mirror of
https://github.com/openshift/installer.git
synced 2026-02-05 15:47:14 +01:00
data/data: remove terraform configs
Removes the terraform config data files, as they are no longer used.
This commit is contained in:
@@ -1,168 +0,0 @@
|
||||
locals {
|
||||
bootstrap_nic_ip_v4_configuration_name = "bootstrap-nic-ip-v4"
|
||||
description = "Created By OpenShift Installer"
|
||||
tags = merge(
|
||||
{
|
||||
"kubernetes.io_cluster.${var.cluster_id}" = "owned"
|
||||
},
|
||||
var.azure_extra_tags,
|
||||
)
|
||||
}
|
||||
|
||||
provider "azurestack" {
|
||||
arm_endpoint = var.azure_arm_endpoint
|
||||
subscription_id = var.azure_subscription_id
|
||||
client_id = var.azure_client_id
|
||||
client_secret = var.azure_client_secret
|
||||
tenant_id = var.azure_tenant_id
|
||||
}
|
||||
|
||||
data "azurestack_storage_account_sas" "ignition" {
|
||||
connection_string = var.storage_account.primary_connection_string
|
||||
https_only = true
|
||||
|
||||
resource_types {
|
||||
service = false
|
||||
container = false
|
||||
object = true
|
||||
}
|
||||
|
||||
services {
|
||||
blob = true
|
||||
queue = false
|
||||
table = false
|
||||
file = false
|
||||
}
|
||||
|
||||
start = timestamp()
|
||||
expiry = timeadd(timestamp(), "24h")
|
||||
|
||||
permissions {
|
||||
read = true
|
||||
list = true
|
||||
create = false
|
||||
add = false
|
||||
delete = false
|
||||
process = false
|
||||
write = false
|
||||
update = false
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurestack_storage_container" "ignition" {
|
||||
name = "ignition"
|
||||
resource_group_name = var.resource_group_name
|
||||
storage_account_name = var.storage_account.name
|
||||
container_access_type = "private"
|
||||
}
|
||||
|
||||
resource "local_file" "ignition_bootstrap" {
|
||||
content = var.ignition_bootstrap
|
||||
filename = "${path.module}/ignition_bootstrap.ign"
|
||||
}
|
||||
|
||||
resource "azurestack_storage_blob" "ignition" {
|
||||
name = "bootstrap.ign"
|
||||
source = local_file.ignition_bootstrap.filename
|
||||
resource_group_name = var.resource_group_name
|
||||
storage_account_name = var.storage_account.name
|
||||
storage_container_name = azurestack_storage_container.ignition.name
|
||||
type = "block"
|
||||
}
|
||||
|
||||
resource "azurestack_public_ip" "bootstrap_public_ip_v4" {
|
||||
count = var.azure_private ? 0 : 1
|
||||
|
||||
location = var.azure_region
|
||||
name = "${var.cluster_id}-bootstrap-pip-v4"
|
||||
resource_group_name = var.resource_group_name
|
||||
public_ip_address_allocation = "Static"
|
||||
}
|
||||
|
||||
data "azurestack_public_ip" "bootstrap_public_ip_v4" {
|
||||
count = var.azure_private ? 0 : 1
|
||||
|
||||
name = azurestack_public_ip.bootstrap_public_ip_v4[0].name
|
||||
resource_group_name = var.resource_group_name
|
||||
}
|
||||
|
||||
resource "azurestack_network_interface" "bootstrap" {
|
||||
name = "${var.cluster_id}-bootstrap-nic"
|
||||
location = var.azure_region
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
ip_configuration {
|
||||
primary = true
|
||||
name = local.bootstrap_nic_ip_v4_configuration_name
|
||||
subnet_id = var.master_subnet_id
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
public_ip_address_id = var.azure_private ? null : azurestack_public_ip.bootstrap_public_ip_v4[0].id
|
||||
load_balancer_backend_address_pools_ids = concat(
|
||||
[var.ilb_backend_pool_v4_id],
|
||||
! var.azure_private ? [var.elb_backend_pool_v4_id] : []
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurestack_virtual_machine" "bootstrap" {
|
||||
name = "${var.cluster_id}-bootstrap"
|
||||
location = var.azure_region
|
||||
resource_group_name = var.resource_group_name
|
||||
network_interface_ids = [azurestack_network_interface.bootstrap.id]
|
||||
vm_size = var.azure_master_vm_type
|
||||
availability_set_id = var.availability_set_id
|
||||
|
||||
os_profile {
|
||||
computer_name = "${var.cluster_id}-bootstrap-vm"
|
||||
admin_username = "core"
|
||||
# The password is normally applied by WALA (the Azure agent), but this
|
||||
# isn't installed in RHCOS. As a result, this password is never set. It is
|
||||
# included here because it is required by the Azure ARM API.
|
||||
admin_password = "NotActuallyApplied!"
|
||||
|
||||
custom_data = base64encode(replace(var.azure_bootstrap_ignition_stub,
|
||||
var.azure_bootstrap_ignition_url_placeholder,
|
||||
"${azurestack_storage_blob.ignition.url}${data.azurestack_storage_account_sas.ignition.sas}"))
|
||||
}
|
||||
|
||||
os_profile_linux_config {
|
||||
disable_password_authentication = false
|
||||
}
|
||||
|
||||
storage_image_reference {
|
||||
id = var.vm_image
|
||||
}
|
||||
|
||||
storage_os_disk {
|
||||
name = "${var.cluster_id}-bootstrap_OSDisk" # os disk name needs to match cluster-api convention
|
||||
create_option = "FromImage"
|
||||
disk_size_gb = 100
|
||||
managed_disk_type = var.azure_master_root_volume_type
|
||||
}
|
||||
|
||||
boot_diagnostics {
|
||||
enabled = true
|
||||
storage_uri = var.storage_account.primary_blob_endpoint
|
||||
}
|
||||
|
||||
# Workaround for bug in provider where destroy fails by trying to delete NIC before VM.
|
||||
# This depends_on ensures the VM is destroyed before the NIC.
|
||||
depends_on = [
|
||||
azurestack_network_interface.bootstrap
|
||||
]
|
||||
}
|
||||
|
||||
resource "azurestack_network_security_rule" "bootstrap_ssh_in" {
|
||||
name = "bootstrap_ssh_in"
|
||||
priority = 103
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "22"
|
||||
source_address_prefix = "*"
|
||||
destination_address_prefix = "*"
|
||||
resource_group_name = var.resource_group_name
|
||||
network_security_group_name = var.nsg_name
|
||||
description = local.description
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "bootstrap_ip" {
|
||||
value = var.azure_private ? azurestack_network_interface.bootstrap.private_ip_address : azurestack_public_ip.bootstrap_public_ip_v4[0].ip_address
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
variable "elb_backend_pool_v4_id" {
|
||||
type = string
|
||||
default = null
|
||||
description = "The external load balancer backend pool id. used to attach the bootstrap NIC"
|
||||
}
|
||||
|
||||
variable "ilb_backend_pool_v4_id" {
|
||||
type = string
|
||||
description = "The internal load balancer backend pool id. used to attach the bootstrap NIC"
|
||||
}
|
||||
|
||||
variable "master_subnet_id" {
|
||||
type = string
|
||||
description = "The subnet ID for the bootstrap node."
|
||||
}
|
||||
|
||||
variable "nsg_name" {
|
||||
type = string
|
||||
description = "The network security group for the subnet."
|
||||
}
|
||||
|
||||
variable "resource_group_name" {
|
||||
type = string
|
||||
description = "The resource group name for the deployment."
|
||||
}
|
||||
|
||||
variable "storage_account" {
|
||||
type = any
|
||||
description = "the storage account for the cluster. It can be used for boot diagnostics."
|
||||
}
|
||||
|
||||
variable "vm_image" {
|
||||
type = string
|
||||
description = "The URI of the vm image to used for bootstrap."
|
||||
}
|
||||
|
||||
variable "availability_set_id" {
|
||||
type = string
|
||||
description = "ID of the availability set in which to place VMs"
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
locals {
|
||||
// extracting <clustername> from <clusterdomain>
|
||||
cluster_name = replace(var.cluster_domain, ".${var.base_domain}", "")
|
||||
}
|
||||
|
||||
resource "azurestack_dns_a_record" "api_external_v4" {
|
||||
name = "api.${local.cluster_name}"
|
||||
zone_name = var.base_domain
|
||||
resource_group_name = var.base_domain_resource_group_name
|
||||
ttl = 300
|
||||
records = var.private ? [var.ilb_ipaddress_v4] : [var.elb_pip_v4]
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
resource "azurestack_dns_a_record" "api_internal_v4" {
|
||||
name = "api-int.${local.cluster_name}"
|
||||
zone_name = var.base_domain
|
||||
resource_group_name = var.base_domain_resource_group_name
|
||||
ttl = 300
|
||||
records = [var.ilb_ipaddress_v4]
|
||||
tags = var.tags
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
variable "tags" {
|
||||
type = map(string)
|
||||
default = {}
|
||||
description = "tags to be applied to created resources."
|
||||
}
|
||||
|
||||
variable "cluster_id" {
|
||||
description = "The identifier for the cluster."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_domain" {
|
||||
description = "The domain for the cluster that all DNS records must belong"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "base_domain" {
|
||||
description = "The base domain used for public records"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "base_domain_resource_group_name" {
|
||||
description = "The resource group where the base domain is"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "elb_fqdn_v4" {
|
||||
description = "External API's LB fqdn for IPv4"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "elb_pip_v4" {
|
||||
description = "Public IP address of the external API's LB"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ilb_ipaddress_v4" {
|
||||
description = "Internal API's LB IP v4 address"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "virtual_network_id" {
|
||||
description = "The ID for Virtual Network that will be linked to the Private DNS zone."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "resource_group_name" {
|
||||
type = string
|
||||
description = "Resource group for the deployment"
|
||||
}
|
||||
|
||||
variable "private" {
|
||||
type = bool
|
||||
description = "This value determines if this is a private cluster or not."
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
locals {
|
||||
tags = merge(
|
||||
{
|
||||
"kubernetes.io_cluster.${var.cluster_id}" = "owned"
|
||||
},
|
||||
var.azure_extra_tags,
|
||||
)
|
||||
description = "Created By OpenShift Installer"
|
||||
}
|
||||
|
||||
provider "azurestack" {
|
||||
arm_endpoint = var.azure_arm_endpoint
|
||||
subscription_id = var.azure_subscription_id
|
||||
client_id = var.azure_client_id
|
||||
client_secret = var.azure_client_secret
|
||||
tenant_id = var.azure_tenant_id
|
||||
}
|
||||
|
||||
|
||||
module "master" {
|
||||
source = "./master"
|
||||
resource_group_name = var.resource_group_name
|
||||
cluster_id = var.cluster_id
|
||||
region = var.azure_region
|
||||
vm_size = var.azure_master_vm_type
|
||||
vm_image_uri = var.vm_image
|
||||
ignition = var.ignition_master
|
||||
elb_backend_pool_v4_id = var.elb_backend_pool_v4_id
|
||||
ilb_backend_pool_v4_id = var.ilb_backend_pool_v4_id
|
||||
subnet_id = var.master_subnet_id
|
||||
instance_count = var.master_count
|
||||
storage_account = var.storage_account
|
||||
os_volume_type = var.azure_master_root_volume_type
|
||||
os_volume_size = var.azure_master_root_volume_size
|
||||
private = var.azure_private
|
||||
availability_set_id = var.availability_set_id
|
||||
}
|
||||
|
||||
module "dns" {
|
||||
source = "./dns"
|
||||
cluster_domain = var.cluster_domain
|
||||
cluster_id = var.cluster_id
|
||||
base_domain = var.base_domain
|
||||
virtual_network_id = var.virtual_network_id
|
||||
elb_fqdn_v4 = var.elb_pip_v4_fqdn
|
||||
elb_pip_v4 = var.elb_pip_v4
|
||||
ilb_ipaddress_v4 = var.ilb_ip_v4_address
|
||||
resource_group_name = var.resource_group_name
|
||||
base_domain_resource_group_name = var.azure_base_domain_resource_group_name
|
||||
private = var.azure_private
|
||||
tags = local.tags
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
locals {
|
||||
// The name of the masters' ipconfiguration is hardcoded to "pipconfig". It needs to match cluster-api
|
||||
// https://github.com/openshift/cluster-api-provider-azure/blob/master/pkg/cloud/azure/services/networkinterfaces/networkinterfaces.go#L131
|
||||
ip_v4_configuration_name = "pipConfig"
|
||||
}
|
||||
|
||||
resource "azurestack_network_interface" "master" {
|
||||
count = var.instance_count
|
||||
|
||||
name = "${var.cluster_id}-master-${count.index}-nic"
|
||||
location = var.region
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
ip_configuration {
|
||||
primary = true
|
||||
name = local.ip_v4_configuration_name
|
||||
subnet_id = var.subnet_id
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
load_balancer_backend_address_pools_ids = concat(
|
||||
[var.ilb_backend_pool_v4_id],
|
||||
! var.private ? [var.elb_backend_pool_v4_id] : []
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurestack_virtual_machine" "master" {
|
||||
count = var.instance_count
|
||||
|
||||
name = "${var.cluster_id}-master-${count.index}"
|
||||
location = var.region
|
||||
resource_group_name = var.resource_group_name
|
||||
network_interface_ids = [element(azurestack_network_interface.master.*.id, count.index)]
|
||||
vm_size = var.vm_size
|
||||
availability_set_id = var.availability_set_id
|
||||
|
||||
os_profile {
|
||||
computer_name = "${var.cluster_id}-master-${count.index}"
|
||||
admin_username = "core"
|
||||
# The password is normally applied by WALA (the Azure agent), but this
|
||||
# isn't installed in RHCOS. As a result, this password is never set. It is
|
||||
# included here because it is required by the Azure ARM API.
|
||||
admin_password = "NotActuallyApplied!"
|
||||
custom_data = base64encode(var.ignition)
|
||||
}
|
||||
|
||||
os_profile_linux_config {
|
||||
disable_password_authentication = false
|
||||
}
|
||||
|
||||
storage_image_reference {
|
||||
id = var.vm_image_uri
|
||||
}
|
||||
|
||||
storage_os_disk {
|
||||
name = "${var.cluster_id}-master-${count.index}_OSDisk" # os disk name needs to match cluster-api convention
|
||||
create_option = "FromImage"
|
||||
disk_size_gb = var.os_volume_size
|
||||
managed_disk_type = var.os_volume_type
|
||||
}
|
||||
|
||||
boot_diagnostics {
|
||||
enabled = true
|
||||
storage_uri = var.storage_account.primary_blob_endpoint
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "ip_addresses" {
|
||||
value = azurestack_network_interface.master.*.private_ip_address
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
variable "region" {
|
||||
type = string
|
||||
description = "The region for the deployment."
|
||||
}
|
||||
|
||||
variable "resource_group_name" {
|
||||
type = string
|
||||
description = "The resource group name for the deployment."
|
||||
}
|
||||
|
||||
variable "cluster_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vm_size" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vm_image_uri" {
|
||||
type = string
|
||||
description = "The URI of the vm image used for masters."
|
||||
}
|
||||
|
||||
variable "instance_count" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "elb_backend_pool_v4_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ilb_backend_pool_v4_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
type = string
|
||||
description = "The subnet to attach the masters to."
|
||||
}
|
||||
|
||||
variable "os_volume_type" {
|
||||
type = string
|
||||
description = "The type of the volume for the root block device."
|
||||
}
|
||||
|
||||
variable "os_volume_size" {
|
||||
type = string
|
||||
description = "The size of the volume in gigabytes for the root block device."
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
type = map(string)
|
||||
default = {}
|
||||
description = "tags to be applied to created resources."
|
||||
}
|
||||
|
||||
variable "storage_account" {
|
||||
type = any
|
||||
description = "the storage account for the cluster. It can be used for boot diagnostics."
|
||||
}
|
||||
|
||||
variable "ignition" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "private" {
|
||||
type = bool
|
||||
description = "This value determines if this is a private cluster or not."
|
||||
}
|
||||
|
||||
variable "availability_set_id" {
|
||||
type = string
|
||||
description = "ID of the availability set in which to place VMs"
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "control_plane_ips" {
|
||||
value = module.master.ip_addresses
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
variable "elb_backend_pool_v4_id" {
|
||||
type = string
|
||||
default = null
|
||||
description = "The external load balancer bakend pool id. used to attach the bootstrap NIC"
|
||||
}
|
||||
|
||||
variable "ilb_backend_pool_v4_id" {
|
||||
type = string
|
||||
default = null
|
||||
description = "The internal load balancer bakend pool id. used to attach the bootstrap NIC"
|
||||
}
|
||||
|
||||
variable "elb_pip_v4" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "elb_pip_v4_fqdn" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "ilb_ip_v4_address" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "virtual_network_id" {
|
||||
description = "The ID for Virtual Network that will be linked to the Private DNS zone."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "master_subnet_id" {
|
||||
type = string
|
||||
description = "The subnet ID for the bootstrap node."
|
||||
}
|
||||
|
||||
variable "nsg_name" {
|
||||
type = string
|
||||
description = "The network security group for the subnet."
|
||||
}
|
||||
|
||||
variable "resource_group_name" {
|
||||
type = string
|
||||
description = "The resource group name for the deployment."
|
||||
}
|
||||
|
||||
variable "storage_account" {
|
||||
type = any
|
||||
description = "the storage account for the cluster. It can be used for boot diagnostics."
|
||||
}
|
||||
|
||||
variable "vm_image" {
|
||||
type = string
|
||||
description = "The resource id of the vm image used for bootstrap."
|
||||
}
|
||||
|
||||
variable "availability_set_id" {
|
||||
type = string
|
||||
description = "ID of the availability set in which to place VMs"
|
||||
}
|
||||
|
||||
variable "bootstrap_ip" {
|
||||
type = string
|
||||
description = "The ip of the bootstrap node. Used for log gathering but not for infrastructure provisioning."
|
||||
}
|
||||
@@ -1,300 +0,0 @@
|
||||
variable "azure_environment" {
|
||||
type = string
|
||||
description = "The target Azure cloud environment for the cluster."
|
||||
}
|
||||
|
||||
variable "azure_region" {
|
||||
type = string
|
||||
description = "The target Azure region for the cluster."
|
||||
}
|
||||
|
||||
variable "azure_master_vm_type" {
|
||||
type = string
|
||||
description = "Instance type for the master node(s). Example: `Standard_D8s_v3`."
|
||||
}
|
||||
|
||||
variable "azure_master_disk_encryption_set_id" {
|
||||
type = string
|
||||
default = null
|
||||
description = "The ID of the Disk Encryption Set which should be used to encrypt OS disk for the master node(s)."
|
||||
}
|
||||
|
||||
variable "azure_master_encryption_at_host_enabled" {
|
||||
type = bool
|
||||
description = "Enables encryption at the VM host for the master node(s)."
|
||||
}
|
||||
|
||||
variable "azure_extra_tags" {
|
||||
type = map(string)
|
||||
|
||||
description = <<EOF
|
||||
(optional) Extra Azure tags to be applied to created resources.
|
||||
|
||||
Example: `{ "key" = "value", "foo" = "bar" }`
|
||||
EOF
|
||||
|
||||
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "azure_master_root_volume_type" {
|
||||
type = string
|
||||
description = "The type of the volume the root block device of master nodes."
|
||||
}
|
||||
|
||||
variable "azure_master_root_volume_size" {
|
||||
type = string
|
||||
description = "The size of the volume in gigabytes for the root block device of master nodes."
|
||||
}
|
||||
|
||||
variable "azure_control_plane_ultra_ssd_enabled" {
|
||||
type = bool
|
||||
description = "Determines if the control plane should have UltraSSD Enabled."
|
||||
}
|
||||
|
||||
variable "azure_base_domain_resource_group_name" {
|
||||
type = string
|
||||
default = ""
|
||||
description = "The resource group that contains the dns zone used as base domain for the cluster."
|
||||
}
|
||||
|
||||
variable "azure_image_url" {
|
||||
type = string
|
||||
description = "The URL of the vm image used for all nodes."
|
||||
}
|
||||
|
||||
variable "azure_arm_endpoint" {
|
||||
type = string
|
||||
description = "The endpoint for the Azure API. Only used when installing to Azure Stack"
|
||||
}
|
||||
|
||||
variable "azure_bootstrap_ignition_stub" {
|
||||
type = string
|
||||
description = "The bootstrap ignition stub. Only used when installing to Azure Stack"
|
||||
}
|
||||
|
||||
variable "azure_bootstrap_ignition_url_placeholder" {
|
||||
type = string
|
||||
description = <<EOF
|
||||
The placeholder value in the bootstrap ignition to be replaced with the ignition URL.
|
||||
Only used when installing to Azure Stack
|
||||
EOF
|
||||
}
|
||||
|
||||
variable "azure_subscription_id" {
|
||||
type = string
|
||||
description = "The subscription that should be used to interact with Azure API"
|
||||
}
|
||||
|
||||
variable "azure_client_id" {
|
||||
type = string
|
||||
description = "The app ID that should be used to interact with Azure API"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "azure_client_secret" {
|
||||
type = string
|
||||
description = "The password that should be used to interact with Azure API"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "azure_certificate_path" {
|
||||
type = string
|
||||
description = "The location of the Azure Service Principal client certificates"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "azure_certificate_password" {
|
||||
type = string
|
||||
description = "The password for the provided Azure Service Principal client certificates"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "azure_tenant_id" {
|
||||
type = string
|
||||
description = "The tenant ID that should be used to interact with Azure API"
|
||||
}
|
||||
|
||||
variable "azure_use_msi" {
|
||||
type = bool
|
||||
default = false
|
||||
description = "Specifies if we are to use a managed identity for authentication"
|
||||
}
|
||||
|
||||
variable "azure_master_availability_zones" {
|
||||
type = list(string)
|
||||
description = "The availability zones in which to create the masters. The length of this list must match master_count."
|
||||
}
|
||||
|
||||
variable "azure_preexisting_network" {
|
||||
type = bool
|
||||
default = false
|
||||
description = "Specifies whether an existing network should be used or a new one created for installation."
|
||||
}
|
||||
|
||||
variable "azure_resource_group_name" {
|
||||
type = string
|
||||
description = <<EOF
|
||||
The name of the resource group for the cluster. If this is set, the cluster is installed to that existing resource group
|
||||
otherwise a new resource group will be created using cluster id.
|
||||
EOF
|
||||
}
|
||||
|
||||
variable "azure_network_resource_group_name" {
|
||||
type = string
|
||||
description = "The name of the network resource group, either existing or to be created."
|
||||
}
|
||||
|
||||
variable "azure_virtual_network" {
|
||||
type = string
|
||||
description = "The name of the virtual network, either existing or to be created."
|
||||
}
|
||||
|
||||
variable "azure_control_plane_subnet" {
|
||||
type = string
|
||||
description = "The name of the subnet for the control plane, either existing or to be created."
|
||||
}
|
||||
|
||||
variable "azure_compute_subnet" {
|
||||
type = string
|
||||
description = "The name of the subnet for worker nodes, either existing or to be created"
|
||||
}
|
||||
|
||||
variable "azure_private" {
|
||||
type = bool
|
||||
description = "This determines if this is a private cluster or not."
|
||||
}
|
||||
|
||||
variable "azure_outbound_routing_type" {
|
||||
type = string
|
||||
default = "Loadbalancer"
|
||||
|
||||
description = <<EOF
|
||||
This determined the routing that will be used for egress to Internet.
|
||||
When not set, Standard LB will be used for egress to the Internet.
|
||||
EOF
|
||||
}
|
||||
|
||||
variable "azure_hypervgeneration_version" {
|
||||
type = string
|
||||
description = <<EOF
|
||||
This determines the HyperVGeneration disk type to use for the control plane VMs.
|
||||
EOF
|
||||
}
|
||||
|
||||
variable "azure_control_plane_vm_networking_type" {
|
||||
type = bool
|
||||
description = "Whether to enable accelerated networking on control plane nodes."
|
||||
}
|
||||
|
||||
variable "random_storage_account_suffix" {
|
||||
type = string
|
||||
description = "A random string generated to add a suffix to the storage account and blob"
|
||||
}
|
||||
|
||||
variable "azure_vm_architecture" {
|
||||
type = string
|
||||
description = "Architecture of the VMs - used when creating images in the image gallery"
|
||||
}
|
||||
|
||||
variable "azure_image_release" {
|
||||
type = string
|
||||
description = "RHCOS release image version - used when creating the image definition in the gallery"
|
||||
}
|
||||
|
||||
variable "azure_use_marketplace_image" {
|
||||
type = bool
|
||||
description = "Whether to use a Marketplace image for all nodes"
|
||||
}
|
||||
|
||||
variable "azure_marketplace_image_has_plan" {
|
||||
type = bool
|
||||
description = "Whether the Marketplace image has a purchase plan"
|
||||
}
|
||||
|
||||
variable "azure_marketplace_image_publisher" {
|
||||
type = string
|
||||
description = "Publisher of the marketplace image"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "azure_marketplace_image_offer" {
|
||||
type = string
|
||||
description = "Offer of the marketplace image"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "azure_marketplace_image_sku" {
|
||||
type = string
|
||||
description = "SKU of the marketplace image"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "azure_marketplace_image_version" {
|
||||
type = string
|
||||
description = "Version of the marketplace image"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "azure_master_security_encryption_type" {
|
||||
type = string
|
||||
default = null
|
||||
|
||||
description = <<EOF
|
||||
Defines the encryption type when the Virtual Machine is a Confidential VM. Possible values are VMGuestStateOnly and DiskWithVMGuestState.
|
||||
When set to "VMGuestStateOnly" azure_master_vtpm_enabled should be set to true.
|
||||
When set to "DiskWithVMGuestState" both azure_master_vtp_enabled and azure_master_secure_boot_enabled should be true.
|
||||
EOF
|
||||
}
|
||||
|
||||
variable "azure_master_secure_vm_disk_encryption_set_id" {
|
||||
type = string
|
||||
default = null
|
||||
|
||||
description = <<EOF
|
||||
Defines the ID of the Disk Encryption Set which should be used to encrypt this OS Disk when the Virtual Machine is a Confidential VM.
|
||||
It can only be set when azure_master_security_encryption_type is set to "DiskWithVMGuestState".
|
||||
EOF
|
||||
}
|
||||
|
||||
variable "azure_master_secure_boot" {
|
||||
type = string
|
||||
description = "Defines whether the instance should have secure boot enabled."
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "azure_master_virtualized_trusted_platform_module" {
|
||||
type = string
|
||||
description = "Defines whether the instance should have vTPM enabled."
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "azure_keyvault_resource_group" {
|
||||
type = string
|
||||
description = "Defines the resource group of the key vault used for storage account encryption."
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "azure_keyvault_name" {
|
||||
type = string
|
||||
description = "Defines the name of the key vault used for storage account encryption."
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "azure_keyvault_key_name" {
|
||||
type = string
|
||||
description = "Defines the key in the key vault used for storage account encryption."
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "azure_user_assigned_identity_key" {
|
||||
type = string
|
||||
description = "Defines the user identity key used for the encryption of storage account."
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "azure_resource_group_metadata_tags" {
|
||||
type = map(string)
|
||||
description = "Metadata Azure tags to be applied to the cluster resource group."
|
||||
default = {}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
# Canonical internal state definitions for this module.
|
||||
# read only: only locals and data source definitions allowed. No resources or module blocks in this file
|
||||
|
||||
data "azurestack_subnet" "preexisting_master_subnet" {
|
||||
count = var.azure_preexisting_network ? 1 : 0
|
||||
|
||||
resource_group_name = var.azure_network_resource_group_name
|
||||
virtual_network_name = var.azure_virtual_network
|
||||
name = var.azure_control_plane_subnet
|
||||
}
|
||||
|
||||
data "azurestack_subnet" "preexisting_worker_subnet" {
|
||||
count = var.azure_preexisting_network ? 1 : 0
|
||||
|
||||
resource_group_name = var.azure_network_resource_group_name
|
||||
virtual_network_name = var.azure_virtual_network
|
||||
name = var.azure_compute_subnet
|
||||
}
|
||||
|
||||
data "azurestack_virtual_network" "preexisting_virtual_network" {
|
||||
count = var.azure_preexisting_network ? 1 : 0
|
||||
|
||||
resource_group_name = var.azure_network_resource_group_name
|
||||
name = var.azure_virtual_network
|
||||
}
|
||||
|
||||
// Only reference data sources which are guaranteed to exist at any time (above) in this locals{} block
|
||||
locals {
|
||||
master_subnet_cidr_v4 = var.use_ipv4 ? cidrsubnet(var.machine_v4_cidrs[0], 1, 0) : null #master subnet is a smaller subnet within the vnet. i.e from /16 to /17
|
||||
master_subnet_cidr_v6 = var.use_ipv6 ? cidrsubnet(var.machine_v6_cidrs[0], 16, 0) : null #master subnet is a smaller subnet within the vnet. i.e from /48 to /64
|
||||
|
||||
worker_subnet_cidr_v4 = var.use_ipv4 ? cidrsubnet(var.machine_v4_cidrs[0], 1, 1) : null #node subnet is a smaller subnet within the vnet. i.e from /16 to /17
|
||||
worker_subnet_cidr_v6 = var.use_ipv6 ? cidrsubnet(var.machine_v6_cidrs[0], 16, 1) : null #node subnet is a smaller subnet within the vnet. i.e from /48 to /64
|
||||
|
||||
master_subnet_id = var.azure_preexisting_network ? data.azurestack_subnet.preexisting_master_subnet[0].id : azurestack_subnet.master_subnet[0].id
|
||||
worker_subnet_id = var.azure_preexisting_network ? data.azurestack_subnet.preexisting_worker_subnet[0].id : azurestack_subnet.worker_subnet[0].id
|
||||
|
||||
virtual_network = var.azure_preexisting_network ? data.azurestack_virtual_network.preexisting_virtual_network[0].name : azurestack_virtual_network.cluster_vnet[0].name
|
||||
virtual_network_id = var.azure_preexisting_network ? data.azurestack_virtual_network.preexisting_virtual_network[0].id : azurestack_virtual_network.cluster_vnet[0].id
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
locals {
|
||||
internal_lb_frontend_ip_v4_configuration_name = "internal-lb-ip-v4"
|
||||
}
|
||||
|
||||
resource "azurestack_lb" "internal" {
|
||||
name = "${var.cluster_id}-internal"
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
location = var.azure_region
|
||||
|
||||
frontend_ip_configuration {
|
||||
name = local.internal_lb_frontend_ip_v4_configuration_name
|
||||
subnet_id = local.master_subnet_id
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurestack_lb_backend_address_pool" "internal_lb_controlplane_pool_v4" {
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
loadbalancer_id = azurestack_lb.internal.id
|
||||
name = var.cluster_id
|
||||
}
|
||||
|
||||
resource "azurestack_lb_rule" "internal_lb_rule_api_internal_v4" {
|
||||
name = "api-internal-v4"
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
protocol = "Tcp"
|
||||
backend_address_pool_id = azurestack_lb_backend_address_pool.internal_lb_controlplane_pool_v4.id
|
||||
loadbalancer_id = azurestack_lb.internal.id
|
||||
frontend_port = 6443
|
||||
backend_port = 6443
|
||||
frontend_ip_configuration_name = local.internal_lb_frontend_ip_v4_configuration_name
|
||||
enable_floating_ip = false
|
||||
idle_timeout_in_minutes = 30
|
||||
load_distribution = "Default"
|
||||
probe_id = azurestack_lb_probe.internal_lb_probe_api_internal.id
|
||||
}
|
||||
|
||||
resource "azurestack_lb_rule" "internal_lb_rule_sint_v4" {
|
||||
name = "sint-v4"
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
protocol = "Tcp"
|
||||
backend_address_pool_id = azurestack_lb_backend_address_pool.internal_lb_controlplane_pool_v4.id
|
||||
loadbalancer_id = azurestack_lb.internal.id
|
||||
frontend_port = 22623
|
||||
backend_port = 22623
|
||||
frontend_ip_configuration_name = local.internal_lb_frontend_ip_v4_configuration_name
|
||||
enable_floating_ip = false
|
||||
idle_timeout_in_minutes = 30
|
||||
load_distribution = "Default"
|
||||
probe_id = azurestack_lb_probe.internal_lb_probe_sint.id
|
||||
}
|
||||
|
||||
resource "azurestack_lb_probe" "internal_lb_probe_sint" {
|
||||
name = "sint-probe"
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
interval_in_seconds = 5
|
||||
number_of_probes = 2
|
||||
loadbalancer_id = azurestack_lb.internal.id
|
||||
port = 22623
|
||||
protocol = "TCP"
|
||||
}
|
||||
|
||||
resource "azurestack_lb_probe" "internal_lb_probe_api_internal" {
|
||||
name = "api-internal-probe"
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
interval_in_seconds = 5
|
||||
number_of_probes = 2
|
||||
loadbalancer_id = azurestack_lb.internal.id
|
||||
port = 6443
|
||||
protocol = "TCP"
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
locals {
|
||||
tags = merge(
|
||||
{
|
||||
"kubernetes.io_cluster.${var.cluster_id}" = "owned"
|
||||
},
|
||||
var.azure_extra_tags,
|
||||
)
|
||||
description = "Created By OpenShift Installer"
|
||||
}
|
||||
|
||||
provider "azurestack" {
|
||||
arm_endpoint = var.azure_arm_endpoint
|
||||
subscription_id = var.azure_subscription_id
|
||||
client_id = var.azure_client_id
|
||||
client_secret = var.azure_client_secret
|
||||
tenant_id = var.azure_tenant_id
|
||||
}
|
||||
|
||||
resource "azurestack_resource_group" "main" {
|
||||
count = var.azure_resource_group_name == "" ? 1 : 0
|
||||
|
||||
name = "${var.cluster_id}-rg"
|
||||
location = var.azure_region
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
data "azurestack_resource_group" "main" {
|
||||
name = var.azure_resource_group_name == "" ? "${var.cluster_id}-rg" : var.azure_resource_group_name
|
||||
|
||||
depends_on = [azurestack_resource_group.main]
|
||||
}
|
||||
|
||||
data "azurestack_resource_group" "network" {
|
||||
count = var.azure_preexisting_network ? 1 : 0
|
||||
|
||||
name = var.azure_network_resource_group_name
|
||||
}
|
||||
|
||||
resource "azurestack_storage_account" "cluster" {
|
||||
name = "cluster${var.random_storage_account_suffix}"
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
location = var.azure_region
|
||||
account_tier = "Standard"
|
||||
account_replication_type = "LRS"
|
||||
}
|
||||
|
||||
# copy over the vhd to cluster resource group and create an image using that
|
||||
resource "azurestack_storage_container" "vhd" {
|
||||
name = "vhd"
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
storage_account_name = azurestack_storage_account.cluster.name
|
||||
}
|
||||
|
||||
resource "azurestack_storage_blob" "rhcos_image" {
|
||||
name = "rhcos${var.random_storage_account_suffix}.vhd"
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
storage_account_name = azurestack_storage_account.cluster.name
|
||||
storage_container_name = azurestack_storage_container.vhd.name
|
||||
type = "page"
|
||||
source_uri = var.azure_image_url
|
||||
}
|
||||
|
||||
resource "azurestack_image" "cluster" {
|
||||
name = var.cluster_id
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
location = var.azure_region
|
||||
|
||||
os_disk {
|
||||
os_type = "Linux"
|
||||
os_state = "Generalized"
|
||||
blob_uri = azurestack_storage_blob.rhcos_image.url
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurestack_availability_set" "cluster_availability_set" {
|
||||
name = "${var.cluster_id}-cluster"
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
location = var.azure_region
|
||||
managed = true
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
resource "azurestack_network_security_group" "cluster" {
|
||||
name = "${var.cluster_id}-nsg"
|
||||
location = var.azure_region
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
}
|
||||
resource "azurestack_network_security_rule" "apiserver_in" {
|
||||
name = "apiserver_in"
|
||||
priority = 101
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "6443"
|
||||
source_address_prefix = "*"
|
||||
destination_address_prefix = "*"
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
network_security_group_name = azurestack_network_security_group.cluster.name
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "azurestack_subnet_network_security_group_association" "master" {
|
||||
count = var.azure_preexisting_network ? 0 : 1
|
||||
|
||||
subnet_id = azurestack_subnet.master_subnet[0].id
|
||||
network_security_group_id = azurestack_network_security_group.cluster.id
|
||||
}
|
||||
|
||||
resource "azurestack_subnet_network_security_group_association" "worker" {
|
||||
count = var.azure_preexisting_network ? 0 : 1
|
||||
|
||||
subnet_id = azurestack_subnet.worker_subnet[0].id
|
||||
network_security_group_id = azurestack_network_security_group.cluster.id
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
output "elb_backend_pool_v4_id" {
|
||||
value = ! var.azure_private ? azurestack_lb_backend_address_pool.public_lb_pool_v4[0].id : null
|
||||
}
|
||||
|
||||
output "ilb_backend_pool_v4_id" {
|
||||
value = azurestack_lb_backend_address_pool.internal_lb_controlplane_pool_v4.id
|
||||
}
|
||||
|
||||
output "elb_pip_v4_fqdn" {
|
||||
// TODO: Do we really need to get the fqdn from a data source instead of the resource?
|
||||
value = ! var.azure_private ? azurestack_public_ip.cluster_public_ip_v4[0].fqdn : null
|
||||
}
|
||||
|
||||
output "elb_pip_v4" {
|
||||
value = ! var.azure_private ? azurestack_public_ip.cluster_public_ip_v4[0].ip_address : null
|
||||
}
|
||||
|
||||
output "ilb_ip_v4_address" {
|
||||
value = azurestack_lb.internal.private_ip_addresses[0]
|
||||
}
|
||||
|
||||
output "nsg_name" {
|
||||
value = azurestack_network_security_group.cluster.name
|
||||
}
|
||||
|
||||
output "virtual_network_id" {
|
||||
value = local.virtual_network_id
|
||||
}
|
||||
|
||||
output "master_subnet_id" {
|
||||
value = local.master_subnet_id
|
||||
}
|
||||
|
||||
output "resource_group_name" {
|
||||
value = data.azurestack_resource_group.main.name
|
||||
}
|
||||
|
||||
output "vm_image" {
|
||||
value = azurestack_image.cluster.id
|
||||
}
|
||||
|
||||
output "storage_account" {
|
||||
value = azurestack_storage_account.cluster
|
||||
}
|
||||
|
||||
output "availability_set_id" {
|
||||
value = azurestack_availability_set.cluster_availability_set.id
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
locals {
|
||||
public_lb_frontend_ip_v4_configuration_name = "public-lb-ip-v4"
|
||||
}
|
||||
|
||||
|
||||
resource "azurestack_public_ip" "cluster_public_ip_v4" {
|
||||
count = ! var.azure_private ? 1 : 0
|
||||
|
||||
location = var.azure_region
|
||||
name = "${var.cluster_id}-pip-v4"
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
public_ip_address_allocation = "Static"
|
||||
domain_name_label = var.cluster_id
|
||||
}
|
||||
|
||||
resource "azurestack_lb" "public" {
|
||||
count = ! var.azure_private ? 1 : 0
|
||||
|
||||
name = var.cluster_id
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
location = var.azure_region
|
||||
|
||||
frontend_ip_configuration {
|
||||
name = local.public_lb_frontend_ip_v4_configuration_name
|
||||
public_ip_address_id = azurestack_public_ip.cluster_public_ip_v4[0].id
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurestack_lb_backend_address_pool" "public_lb_pool_v4" {
|
||||
count = ! var.azure_private ? 1 : 0
|
||||
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
loadbalancer_id = azurestack_lb.public[0].id
|
||||
name = var.cluster_id
|
||||
}
|
||||
|
||||
resource "azurestack_lb_rule" "public_lb_rule_api_internal_v4" {
|
||||
count = ! var.azure_private ? 1 : 0
|
||||
|
||||
name = "api-internal-v4"
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
protocol = "Tcp"
|
||||
backend_address_pool_id = azurestack_lb_backend_address_pool.public_lb_pool_v4[0].id
|
||||
loadbalancer_id = azurestack_lb.public[0].id
|
||||
frontend_port = 6443
|
||||
backend_port = 6443
|
||||
frontend_ip_configuration_name = local.public_lb_frontend_ip_v4_configuration_name
|
||||
enable_floating_ip = false
|
||||
idle_timeout_in_minutes = 30
|
||||
load_distribution = "Default"
|
||||
probe_id = azurestack_lb_probe.public_lb_probe_api_internal[0].id
|
||||
}
|
||||
|
||||
resource "azurestack_lb_probe" "public_lb_probe_api_internal" {
|
||||
count = ! var.azure_private ? 1 : 0
|
||||
|
||||
name = "api-internal-probe"
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
interval_in_seconds = 5
|
||||
number_of_probes = 2
|
||||
loadbalancer_id = azurestack_lb.public[0].id
|
||||
port = 6443
|
||||
protocol = "TCP"
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
resource "azurestack_virtual_network" "cluster_vnet" {
|
||||
count = var.azure_preexisting_network ? 0 : 1
|
||||
|
||||
name = var.azure_virtual_network
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
location = var.azure_region
|
||||
address_space = var.machine_v4_cidrs
|
||||
}
|
||||
|
||||
resource "azurestack_subnet" "master_subnet" {
|
||||
count = var.azure_preexisting_network ? 0 : 1
|
||||
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
address_prefix = local.master_subnet_cidr_v4
|
||||
virtual_network_name = local.virtual_network
|
||||
name = var.azure_control_plane_subnet
|
||||
network_security_group_id = azurestack_network_security_group.cluster.id
|
||||
}
|
||||
|
||||
resource "azurestack_subnet" "worker_subnet" {
|
||||
count = var.azure_preexisting_network ? 0 : 1
|
||||
|
||||
resource_group_name = data.azurestack_resource_group.main.name
|
||||
address_prefix = local.worker_subnet_cidr_v4
|
||||
virtual_network_name = local.virtual_network
|
||||
name = var.azure_compute_subnet
|
||||
network_security_group_id = azurestack_network_security_group.cluster.id
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">= 0.14"
|
||||
}
|
||||
|
||||
variable "machine_v4_cidrs" {
|
||||
type = list(string)
|
||||
|
||||
description = <<EOF
|
||||
The list of IPv4 address spaces from which to assign machine IPs.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "machine_v6_cidrs" {
|
||||
type = list(string)
|
||||
|
||||
description = <<EOF
|
||||
The list of IPv6 address spaces from which to assign machine IPs.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "master_count" {
|
||||
type = string
|
||||
|
||||
default = "1"
|
||||
|
||||
description = <<EOF
|
||||
The number of master nodes to be created.
|
||||
This applies only to cloud platforms.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "masters_schedulable" {
|
||||
type = bool
|
||||
|
||||
default = false
|
||||
|
||||
description = <<EOF
|
||||
Whether master nodes are schedulables.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "base_domain" {
|
||||
type = string
|
||||
|
||||
description = <<EOF
|
||||
The base DNS domain of the cluster. It must NOT contain a trailing period. Some
|
||||
DNS providers will automatically add this if necessary.
|
||||
|
||||
Example: `openshift.example.com`.
|
||||
|
||||
Note: This field MUST be set manually prior to creating the cluster.
|
||||
This applies only to cloud platforms.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "cluster_domain" {
|
||||
type = string
|
||||
|
||||
description = <<EOF
|
||||
The domain of the cluster. It must NOT contain a trailing period. Some
|
||||
DNS providers will automatically add this if necessary.
|
||||
|
||||
All the records for the cluster are created under this domain.
|
||||
|
||||
Note: This field MUST be set manually prior to creating the cluster.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "ignition_master" {
|
||||
type = string
|
||||
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
(internal) Ignition config file contents. This is automatically generated by the installer.
|
||||
EOF
|
||||
|
||||
sensitive = true
|
||||
|
||||
}
|
||||
|
||||
variable "ignition_bootstrap" {
|
||||
type = string
|
||||
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
(internal) Ignition config file contents. This is automatically generated by the installer.
|
||||
EOF
|
||||
|
||||
sensitive = true
|
||||
|
||||
}
|
||||
|
||||
variable "ignition_bootstrap_file" {
|
||||
type = string
|
||||
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
(internal) Path to Ignition config file contents for bootstrap host. This is automatically generated by the installer.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
// This variable is generated by OpenShift internally. Do not modify
|
||||
variable "cluster_id" {
|
||||
type = string
|
||||
|
||||
description = <<EOF
|
||||
(internal) The OpenShift cluster id.
|
||||
|
||||
This cluster id must be of max length 27 and must have only alphanumeric or hyphen characters.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "use_ipv4" {
|
||||
type = bool
|
||||
|
||||
description = <<EOF
|
||||
Should the cluster be created with ipv4 networking.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "use_ipv6" {
|
||||
type = bool
|
||||
|
||||
description = <<EOF
|
||||
Should the cluster be created with ipv6 networking.
|
||||
EOF
|
||||
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
# This file just uses aliases defined in OWNERS_ALIASES.
|
||||
|
||||
approvers:
|
||||
- ibmcloud-approvers
|
||||
reviewers:
|
||||
- ibmcloud-reviewers
|
||||
@@ -1,23 +0,0 @@
|
||||
locals {
|
||||
description = "Created By OpenShift Installer"
|
||||
# If specified, set visibility to 'private' for IBM Terraform Provider
|
||||
endpoint_visibility = var.ibmcloud_terraform_private_visibility ? "private" : "public"
|
||||
public_endpoints = var.ibmcloud_publish_strategy == "External" ? true : false
|
||||
tags = concat(
|
||||
["kubernetes.io_cluster_${var.cluster_id}:owned"],
|
||||
var.ibmcloud_extra_tags
|
||||
)
|
||||
}
|
||||
|
||||
############################################
|
||||
# IBM Cloud provider
|
||||
############################################
|
||||
|
||||
provider "ibm" {
|
||||
ibmcloud_api_key = var.ibmcloud_api_key
|
||||
region = var.ibmcloud_region
|
||||
|
||||
# Manage endpoints for IBM Cloud services
|
||||
visibility = local.endpoint_visibility
|
||||
endpoints_file_path = var.ibmcloud_endpoints_json_file
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
locals {
|
||||
# Use the direct COS endpoint if IBM Cloud Service Endpoints are being overridden,
|
||||
# as public and private may not be available. The direct endpoint requires
|
||||
# additional IBM Cloud Account configuration, which must be configured when using
|
||||
# Service Endpoint overrides.
|
||||
cos_endpoint_type = local.endpoint_visibility == "private" ? "direct" : "public"
|
||||
}
|
||||
|
||||
############################################
|
||||
# COS bucket
|
||||
############################################
|
||||
|
||||
resource "ibm_cos_bucket" "bootstrap_ignition" {
|
||||
bucket_name = "${local.prefix}-bootstrap-ignition"
|
||||
endpoint_type = local.cos_endpoint_type
|
||||
resource_instance_id = var.cos_resource_instance_crn
|
||||
region_location = var.ibmcloud_region
|
||||
storage_class = "smart"
|
||||
}
|
||||
|
||||
############################################
|
||||
# COS object
|
||||
############################################
|
||||
|
||||
resource "ibm_cos_bucket_object" "bootstrap_ignition" {
|
||||
bucket_crn = ibm_cos_bucket.bootstrap_ignition.crn
|
||||
bucket_location = ibm_cos_bucket.bootstrap_ignition.region_location
|
||||
content_file = var.ignition_bootstrap_file
|
||||
endpoint_type = local.cos_endpoint_type
|
||||
etag = filemd5(var.ignition_bootstrap_file)
|
||||
key = "bootstrap.ign"
|
||||
}
|
||||
|
||||
############################################
|
||||
# IAM service credentials
|
||||
############################################
|
||||
|
||||
# NOTE/TODO: Get IAM token for created Service ID, not supported in provider
|
||||
data "ibm_iam_auth_token" "iam_token" {}
|
||||
|
||||
# NOTE: Not used at the moment
|
||||
# resource "ibm_iam_service_id" "cos" {
|
||||
# name = "${local.prefix}-cos-service-id"
|
||||
# }
|
||||
|
||||
# NOTE: Not used at the moment
|
||||
# resource "ibm_resource_key" "cos_reader" {
|
||||
# name = "${local.prefix}-cos-reader"
|
||||
# role = "Reader"
|
||||
# resource_instance_id = ibm_resource_instance.cos.id
|
||||
# parameters = {
|
||||
# HMAC = true
|
||||
# serviceid_crn = ibm_iam_service_id.cos.crn
|
||||
# }
|
||||
# }
|
||||
|
||||
# NOTE: Not used at the moment
|
||||
# resource "ibm_resource_key" "cos_writer" {
|
||||
# name = "${local.prefix}-cos-writer"
|
||||
# role = "Writer"
|
||||
# resource_instance_id = ibm_resource_instance.cos.id
|
||||
# parameters = {
|
||||
# HMAC = true
|
||||
# serviceid_crn = ibm_iam_service_id.cos.crn
|
||||
# }
|
||||
# }
|
||||
@@ -1,133 +0,0 @@
|
||||
locals {
|
||||
prefix = var.cluster_id
|
||||
port_kubernetes_api = 6443
|
||||
port_machine_config = 22623
|
||||
|
||||
# If we need to setup SecurityGroupRules to SSH to bootstrap, for non-public clusters (no Floating IP)
|
||||
# combine the Control Plane and Compute subnet CIDRs, to create rules for ingress on those CIDR's
|
||||
all_subnet_cidrs = local.public_endpoints ? [] : concat(data.ibm_is_subnet.control_plane_subnets[*].ipv4_cidr_block, data.ibm_is_subnet.compute_subnets[*].ipv4_cidr_block)
|
||||
|
||||
# If a boot volume encryption key CRN was supplied, create a list containing that CRN, otherwise an empty list for a dynamic block of boot volumes
|
||||
boot_volume_key_crns = var.ibmcloud_control_plane_boot_volume_key == "" ? [] : [var.ibmcloud_control_plane_boot_volume_key]
|
||||
}
|
||||
|
||||
############################################
|
||||
# Subnet lookup
|
||||
############################################
|
||||
data "ibm_is_subnet" "control_plane_subnets" {
|
||||
count = local.public_endpoints ? 0 : length(var.control_plane_subnet_id_list)
|
||||
|
||||
identifier = var.control_plane_subnet_id_list[count.index]
|
||||
}
|
||||
|
||||
data "ibm_is_subnet" "compute_subnets" {
|
||||
count = local.public_endpoints ? 0 : length(var.compute_subnet_id_list)
|
||||
|
||||
identifier = var.compute_subnet_id_list[count.index]
|
||||
}
|
||||
|
||||
############################################
|
||||
# Bootstrap node
|
||||
############################################
|
||||
|
||||
resource "ibm_is_instance" "bootstrap_node" {
|
||||
name = "${local.prefix}-bootstrap"
|
||||
image = var.vsi_image_id
|
||||
profile = var.ibmcloud_bootstrap_instance_type
|
||||
resource_group = var.resource_group_id
|
||||
tags = local.tags
|
||||
|
||||
primary_network_interface {
|
||||
name = "eth0"
|
||||
subnet = var.control_plane_subnet_id_list[0]
|
||||
security_groups = concat(var.control_plane_security_group_id_list, [ibm_is_security_group.bootstrap.id])
|
||||
}
|
||||
|
||||
dynamic "boot_volume" {
|
||||
for_each = local.boot_volume_key_crns
|
||||
content {
|
||||
encryption = boot_volume.value
|
||||
}
|
||||
}
|
||||
|
||||
dedicated_host = length(var.control_plane_dedicated_host_id_list) > 0 ? var.control_plane_dedicated_host_id_list[0] : null
|
||||
|
||||
vpc = var.vpc_id
|
||||
zone = var.control_plane_subnet_zone_list[0]
|
||||
keys = []
|
||||
|
||||
# Use custom ignition config that pulls content from COS bucket
|
||||
# TODO: Once support for the httpHeaders field is added to
|
||||
# terraform-provider-ignition, we should use it instead of this template.
|
||||
# https://github.com/community-terraform-providers/terraform-provider-ignition/issues/16
|
||||
user_data = templatefile("${path.module}/templates/bootstrap.ign", {
|
||||
HOSTNAME = replace(ibm_cos_bucket.bootstrap_ignition.s3_endpoint_direct, "https://", "")
|
||||
BUCKET_NAME = ibm_cos_bucket.bootstrap_ignition.bucket_name
|
||||
OBJECT_NAME = ibm_cos_bucket_object.bootstrap_ignition.key
|
||||
IAM_TOKEN = data.ibm_iam_auth_token.iam_token.iam_access_token
|
||||
})
|
||||
}
|
||||
|
||||
############################################
|
||||
# Floating IP
|
||||
############################################
|
||||
|
||||
resource "ibm_is_floating_ip" "bootstrap_floatingip" {
|
||||
count = local.public_endpoints ? 1 : 0
|
||||
|
||||
name = "${local.prefix}-bootstrap-node-ip"
|
||||
resource_group = var.resource_group_id
|
||||
target = ibm_is_instance.bootstrap_node.primary_network_interface.0.id
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
############################################
|
||||
# Security group
|
||||
############################################
|
||||
|
||||
resource "ibm_is_security_group" "bootstrap" {
|
||||
name = "${local.prefix}-security-group-bootstrap"
|
||||
resource_group = var.resource_group_id
|
||||
tags = local.tags
|
||||
vpc = var.vpc_id
|
||||
}
|
||||
|
||||
# SSH
|
||||
resource "ibm_is_security_group_rule" "bootstrap_ssh_inbound" {
|
||||
count = local.public_endpoints ? 1 : length(local.all_subnet_cidrs)
|
||||
|
||||
group = ibm_is_security_group.bootstrap.id
|
||||
direction = "inbound"
|
||||
remote = local.public_endpoints ? "0.0.0.0/0" : local.all_subnet_cidrs[count.index]
|
||||
tcp {
|
||||
port_min = 22
|
||||
port_max = 22
|
||||
}
|
||||
}
|
||||
|
||||
############################################
|
||||
# Load balancer backend pool members
|
||||
############################################
|
||||
|
||||
resource "ibm_is_lb_pool_member" "kubernetes_api_public" {
|
||||
count = local.public_endpoints ? 1 : 0
|
||||
|
||||
lb = var.lb_kubernetes_api_public_id
|
||||
pool = var.lb_pool_kubernetes_api_public_id
|
||||
port = local.port_kubernetes_api
|
||||
target_address = ibm_is_instance.bootstrap_node.primary_network_interface.0.primary_ipv4_address
|
||||
}
|
||||
|
||||
resource "ibm_is_lb_pool_member" "kubernetes_api_private" {
|
||||
lb = var.lb_kubernetes_api_private_id
|
||||
pool = var.lb_pool_kubernetes_api_private_id
|
||||
port = local.port_kubernetes_api
|
||||
target_address = ibm_is_instance.bootstrap_node.primary_network_interface.0.primary_ipv4_address
|
||||
}
|
||||
|
||||
resource "ibm_is_lb_pool_member" "machine_config" {
|
||||
lb = var.lb_kubernetes_api_private_id
|
||||
pool = var.lb_pool_machine_config_id
|
||||
port = local.port_machine_config
|
||||
target_address = ibm_is_instance.bootstrap_node.primary_network_interface.0.primary_ipv4_address
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "bootstrap_ip" {
|
||||
value = local.public_endpoints ? ibm_is_floating_ip.bootstrap_floatingip[0].address : ibm_is_instance.bootstrap_node.primary_network_interface[0].primary_ipv4_address
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
"ignition": {
|
||||
"version": "3.2.0",
|
||||
"config": {
|
||||
"replace": {
|
||||
"source": "https://${HOSTNAME}/${BUCKET_NAME}/${OBJECT_NAME}",
|
||||
"httpHeaders": [
|
||||
{
|
||||
"name": "Authorization",
|
||||
"value": "${IAM_TOKEN}"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
#######################################
|
||||
# Bootstrap module variables
|
||||
#######################################
|
||||
|
||||
variable "control_plane_dedicated_host_id_list" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "control_plane_security_group_id_list" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "control_plane_subnet_id_list" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "compute_subnet_id_list" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "control_plane_subnet_zone_list" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "cos_resource_instance_crn" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "lb_kubernetes_api_public_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "lb_kubernetes_api_private_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "lb_pool_kubernetes_api_public_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "lb_pool_kubernetes_api_private_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "lb_pool_machine_config_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "resource_group_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vpc_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vsi_image_id" {
|
||||
type = string
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
locals {
|
||||
description = "Created By OpenShift Installer"
|
||||
# If specified, set visibility to 'private' for IBM Terraform Provider
|
||||
endpoint_visibility = var.ibmcloud_terraform_private_visibility ? "private" : "public"
|
||||
public_endpoints = var.ibmcloud_publish_strategy == "External" ? true : false
|
||||
tags = concat(
|
||||
["kubernetes.io_cluster_${var.cluster_id}:owned"],
|
||||
var.ibmcloud_extra_tags
|
||||
)
|
||||
}
|
||||
|
||||
############################################
|
||||
# IBM Cloud provider
|
||||
############################################
|
||||
|
||||
provider "ibm" {
|
||||
ibmcloud_api_key = var.ibmcloud_api_key
|
||||
region = var.ibmcloud_region
|
||||
|
||||
# Manage endpoints for IBM Cloud services
|
||||
visibility = local.endpoint_visibility
|
||||
endpoints_file_path = var.ibmcloud_endpoints_json_file
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
locals {
|
||||
# If a boot volume encryption key CRN was supplied, create a list containing that CRN, otherwise an empty list for a dynamic block of boot volumes
|
||||
boot_volume_key_crns = var.ibmcloud_control_plane_boot_volume_key == "" ? [] : [var.ibmcloud_control_plane_boot_volume_key]
|
||||
prefix = var.cluster_id
|
||||
port_kubernetes_api = 6443
|
||||
port_machine_config = 22623
|
||||
subnet_count = length(var.control_plane_subnet_id_list)
|
||||
zone_count = length(var.control_plane_subnet_zone_list)
|
||||
}
|
||||
|
||||
############################################
|
||||
# Master nodes
|
||||
############################################
|
||||
|
||||
resource "ibm_is_instance" "master_node" {
|
||||
count = var.master_count
|
||||
|
||||
name = "${local.prefix}-master-${count.index}"
|
||||
image = var.vsi_image_id
|
||||
profile = var.ibmcloud_master_instance_type
|
||||
resource_group = var.resource_group_id
|
||||
tags = local.tags
|
||||
|
||||
primary_network_interface {
|
||||
name = "eth0"
|
||||
subnet = var.control_plane_subnet_id_list[count.index % local.subnet_count]
|
||||
security_groups = var.control_plane_security_group_id_list
|
||||
}
|
||||
|
||||
dynamic "boot_volume" {
|
||||
for_each = local.boot_volume_key_crns
|
||||
content {
|
||||
encryption = boot_volume.value
|
||||
}
|
||||
}
|
||||
|
||||
dedicated_host = length(var.control_plane_dedicated_host_id_list) > 0 ? var.control_plane_dedicated_host_id_list[count.index % local.zone_count] : null
|
||||
|
||||
vpc = var.vpc_id
|
||||
zone = var.control_plane_subnet_zone_list[count.index % local.zone_count]
|
||||
keys = []
|
||||
|
||||
user_data = var.ignition_master
|
||||
}
|
||||
|
||||
############################################
|
||||
# Load balancer backend pool members
|
||||
############################################
|
||||
|
||||
resource "ibm_is_lb_pool_member" "kubernetes_api_public" {
|
||||
count = local.public_endpoints ? var.master_count : 0
|
||||
|
||||
lb = var.lb_kubernetes_api_public_id
|
||||
pool = var.lb_pool_kubernetes_api_public_id
|
||||
port = local.port_kubernetes_api
|
||||
target_address = ibm_is_instance.master_node[count.index].primary_network_interface.0.primary_ipv4_address
|
||||
}
|
||||
|
||||
resource "ibm_is_lb_pool_member" "kubernetes_api_private" {
|
||||
count = var.master_count
|
||||
|
||||
lb = var.lb_kubernetes_api_private_id
|
||||
pool = var.lb_pool_kubernetes_api_private_id
|
||||
port = local.port_kubernetes_api
|
||||
target_address = ibm_is_instance.master_node[count.index].primary_network_interface.0.primary_ipv4_address
|
||||
}
|
||||
|
||||
resource "ibm_is_lb_pool_member" "machine_config" {
|
||||
count = var.master_count
|
||||
|
||||
lb = var.lb_kubernetes_api_private_id
|
||||
pool = var.lb_pool_machine_config_id
|
||||
port = local.port_machine_config
|
||||
target_address = ibm_is_instance.master_node[count.index].primary_network_interface.0.primary_ipv4_address
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "control_plane_ips" {
|
||||
value = ibm_is_instance.master_node[*].primary_network_interface[0].primary_ipv4_address
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
#######################################
|
||||
# Master module variables
|
||||
#######################################
|
||||
|
||||
variable "control_plane_dedicated_host_id_list" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "control_plane_security_group_id_list" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "control_plane_subnet_id_list" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "control_plane_subnet_zone_list" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "cos_resource_instance_crn" {
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "lb_kubernetes_api_public_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "lb_kubernetes_api_private_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "lb_pool_kubernetes_api_public_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "lb_pool_kubernetes_api_private_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "lb_pool_machine_config_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "resource_group_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vpc_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vsi_image_id" {
|
||||
type = string
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
############################################
|
||||
# Datasources
|
||||
############################################
|
||||
|
||||
data "ibm_cis_domain" "base_domain" {
|
||||
count = var.is_external ? 1 : 0
|
||||
|
||||
cis_id = var.cis_id
|
||||
domain = var.base_domain
|
||||
}
|
||||
|
||||
############################################
|
||||
# CIS DNS records (CNAME)
|
||||
############################################
|
||||
|
||||
resource "ibm_cis_dns_record" "kubernetes_api" {
|
||||
count = var.is_external ? 1 : 0
|
||||
|
||||
cis_id = var.cis_id
|
||||
domain_id = data.ibm_cis_domain.base_domain[0].id
|
||||
type = "CNAME"
|
||||
name = "api.${var.cluster_domain}"
|
||||
content = var.lb_kubernetes_api_public_hostname != "" ? var.lb_kubernetes_api_public_hostname : var.lb_kubernetes_api_private_hostname
|
||||
ttl = 60
|
||||
}
|
||||
|
||||
resource "ibm_cis_dns_record" "kubernetes_api_internal" {
|
||||
count = var.is_external ? 1 : 0
|
||||
|
||||
cis_id = var.cis_id
|
||||
domain_id = data.ibm_cis_domain.base_domain[0].id
|
||||
type = "CNAME"
|
||||
name = "api-int.${var.cluster_domain}"
|
||||
content = var.lb_kubernetes_api_private_hostname
|
||||
ttl = 60
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
############################################
|
||||
# CIS module variables
|
||||
############################################
|
||||
|
||||
variable "cis_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "base_domain" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_domain" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "is_external" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "lb_kubernetes_api_public_hostname" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "lb_kubernetes_api_private_hostname" {
|
||||
type = string
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
locals {
|
||||
description = "Created By OpenShift Installer"
|
||||
# If specified, set visibility to 'private' for IBM Terraform Provider
|
||||
endpoint_visibility = var.ibmcloud_terraform_private_visibility ? "private" : "public"
|
||||
public_endpoints = var.ibmcloud_publish_strategy == "External" ? true : false
|
||||
tags = concat(
|
||||
["kubernetes.io_cluster_${var.cluster_id}:owned"],
|
||||
var.ibmcloud_extra_tags
|
||||
)
|
||||
}
|
||||
|
||||
############################################
|
||||
# IBM Cloud provider
|
||||
############################################
|
||||
|
||||
provider "ibm" {
|
||||
ibmcloud_api_key = var.ibmcloud_api_key
|
||||
region = var.ibmcloud_region
|
||||
|
||||
# Manage endpoints for IBM Cloud services
|
||||
visibility = local.endpoint_visibility
|
||||
endpoints_file_path = var.ibmcloud_endpoints_json_file
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
locals {
|
||||
prefix = var.cluster_id
|
||||
dhosts_master_create = [for dhost in var.dedicated_hosts_master : dhost if lookup(dhost, "id", "") == ""]
|
||||
dhosts_master_zones = [for i, dhost in var.dedicated_hosts_master : var.zones_master[i] if lookup(dhost, "id", "") == ""]
|
||||
dhosts_worker_create = [for dhost in var.dedicated_hosts_worker : dhost if lookup(dhost, "id", "") == ""]
|
||||
dhosts_worker_zones = [for i, dhost in var.dedicated_hosts_worker : var.zones_worker[i] if lookup(dhost, "id", "") == ""]
|
||||
dhosts_master_merged = [
|
||||
for i, dhost in var.dedicated_hosts_master :
|
||||
lookup(dhost, "id", "") == ""
|
||||
? ibm_is_dedicated_host.control_plane[index(ibm_is_dedicated_host.control_plane.*.zone, var.zones_master[i])].id
|
||||
: dhost.id
|
||||
]
|
||||
}
|
||||
|
||||
############################################
|
||||
# Dedicated hosts (Control Plane)
|
||||
############################################
|
||||
|
||||
data "ibm_is_dedicated_host_profile" "control_plane" {
|
||||
count = length(local.dhosts_master_create)
|
||||
name = local.dhosts_master_create[count.index].profile
|
||||
}
|
||||
|
||||
resource "ibm_is_dedicated_host_group" "control_plane" {
|
||||
count = length(local.dhosts_master_create)
|
||||
|
||||
name = "${local.prefix}-dgroup-control-plane-${local.dhosts_master_zones[count.index]}"
|
||||
class = data.ibm_is_dedicated_host_profile.control_plane[count.index].class
|
||||
family = data.ibm_is_dedicated_host_profile.control_plane[count.index].family
|
||||
resource_group = var.resource_group_id
|
||||
zone = local.dhosts_master_zones[count.index]
|
||||
}
|
||||
|
||||
resource "ibm_is_dedicated_host" "control_plane" {
|
||||
count = length(local.dhosts_master_create)
|
||||
|
||||
name = "${local.prefix}-dhost-control-plane-${local.dhosts_master_zones[count.index]}"
|
||||
host_group = ibm_is_dedicated_host_group.control_plane[count.index].id
|
||||
profile = local.dhosts_master_create[count.index].profile
|
||||
resource_group = var.resource_group_id
|
||||
|
||||
instance_placement_enabled = true
|
||||
}
|
||||
|
||||
############################################
|
||||
# Dedicated hosts (Compute)
|
||||
############################################
|
||||
|
||||
data "ibm_is_dedicated_host_profile" "compute" {
|
||||
count = length(local.dhosts_worker_create)
|
||||
name = local.dhosts_worker_create[count.index].profile
|
||||
}
|
||||
|
||||
resource "ibm_is_dedicated_host_group" "compute" {
|
||||
count = length(local.dhosts_worker_create)
|
||||
|
||||
name = "${local.prefix}-dgroup-compute-${local.dhosts_worker_zones[count.index]}"
|
||||
class = data.ibm_is_dedicated_host_profile.compute[count.index].class
|
||||
family = data.ibm_is_dedicated_host_profile.compute[count.index].family
|
||||
resource_group = var.resource_group_id
|
||||
zone = local.dhosts_worker_zones[count.index]
|
||||
}
|
||||
|
||||
resource "ibm_is_dedicated_host" "compute" {
|
||||
count = length(local.dhosts_worker_create)
|
||||
|
||||
name = "${local.prefix}-dhost-compute-${local.dhosts_worker_zones[count.index]}"
|
||||
host_group = ibm_is_dedicated_host_group.compute[count.index].id
|
||||
profile = local.dhosts_worker_create[count.index].profile
|
||||
resource_group = var.resource_group_id
|
||||
|
||||
instance_placement_enabled = true
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
#######################################
|
||||
# Dedicated Host module outputs
|
||||
#######################################
|
||||
|
||||
output "control_plane_dedicated_host_id_list" {
|
||||
value = local.dhosts_master_merged
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
#######################################
|
||||
# Dedicated Host module variables
|
||||
#######################################
|
||||
|
||||
variable "cluster_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "dedicated_hosts_master" {
|
||||
type = list(map(string))
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "dedicated_hosts_worker" {
|
||||
type = list(map(string))
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "resource_group_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "zones_master" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "zones_worker" {
|
||||
type = list(string)
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
locals {
|
||||
dns_zone_id = var.is_external ? "" : data.ibm_dns_zones.zones[0].dns_zones[index(data.ibm_dns_zones.zones[0].dns_zones[*].name, var.base_domain)].zone_id
|
||||
}
|
||||
|
||||
############################################
|
||||
# DNS Zone
|
||||
############################################
|
||||
|
||||
data "ibm_dns_zones" "zones" {
|
||||
count = var.is_external ? 0 : 1
|
||||
|
||||
instance_id = var.dns_id
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
############################################
|
||||
# DNS permitted networks
|
||||
############################################
|
||||
|
||||
resource "ibm_dns_permitted_network" "vpc" {
|
||||
# Only create the Permitted Network if Internal (Private using DNS) and the VPC is not already a Permitted Network
|
||||
count = ! var.is_external && ! var.vpc_permitted ? 1 : 0
|
||||
|
||||
instance_id = var.dns_id
|
||||
zone_id = local.dns_zone_id
|
||||
vpc_crn = var.vpc_crn
|
||||
type = "vpc"
|
||||
}
|
||||
|
||||
############################################
|
||||
# DNS records (CNAME)
|
||||
############################################
|
||||
|
||||
resource "ibm_dns_resource_record" "kubernetes_api_internal_public" {
|
||||
count = var.is_external ? 0 : 1
|
||||
|
||||
instance_id = var.dns_id
|
||||
zone_id = local.dns_zone_id
|
||||
type = "CNAME"
|
||||
name = "api.${var.cluster_domain}"
|
||||
rdata = var.lb_kubernetes_api_private_hostname
|
||||
ttl = "60"
|
||||
}
|
||||
|
||||
resource "ibm_dns_resource_record" "kubernetes_api_private" {
|
||||
count = var.is_external ? 0 : 1
|
||||
|
||||
instance_id = var.dns_id
|
||||
zone_id = local.dns_zone_id
|
||||
type = "CNAME"
|
||||
name = "api-int.${var.cluster_domain}"
|
||||
rdata = var.lb_kubernetes_api_private_hostname
|
||||
ttl = "60"
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
############################################
|
||||
# DNS module variables
|
||||
############################################
|
||||
|
||||
variable "dns_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vpc_crn" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vpc_permitted" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "base_domain" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_domain" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "is_external" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "lb_kubernetes_api_private_hostname" {
|
||||
type = string
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
locals {
|
||||
# Use the direct COS endpoint if IBM Cloud Service Endpoints are being overridden,
|
||||
# as public and private may not be available. The direct endpoint requires
|
||||
# additional IBM Cloud Account configuration, which must be configured when using
|
||||
# Service Endpoint overrides.
|
||||
cos_endpoint_type = var.endpoint_visibility == "private" ? "direct" : "public"
|
||||
prefix = var.cluster_id
|
||||
}
|
||||
|
||||
resource "ibm_cos_bucket" "images" {
|
||||
bucket_name = "${local.prefix}-vsi-image"
|
||||
# Use the direct COS endpoint if IBM Cloud Service endpoints are being overridden,
|
||||
# as public and private may not be available. Direct requires additional IBM Cloud
|
||||
# Account configuration
|
||||
endpoint_type = local.cos_endpoint_type
|
||||
resource_instance_id = var.cos_resource_instance_crn
|
||||
region_location = var.region
|
||||
storage_class = "smart"
|
||||
}
|
||||
|
||||
resource "ibm_cos_bucket_object" "file" {
|
||||
bucket_crn = ibm_cos_bucket.images.crn
|
||||
bucket_location = ibm_cos_bucket.images.region_location
|
||||
content_file = var.image_filepath
|
||||
endpoint_type = local.cos_endpoint_type
|
||||
key = basename(var.image_filepath)
|
||||
}
|
||||
|
||||
resource "ibm_iam_authorization_policy" "policy" {
|
||||
source_service_name = "is"
|
||||
source_resource_type = "image"
|
||||
target_service_name = "cloud-object-storage"
|
||||
target_resource_instance_id = element(split(":", var.cos_resource_instance_crn), 7)
|
||||
roles = ["Reader"]
|
||||
}
|
||||
|
||||
resource "ibm_is_image" "image" {
|
||||
depends_on = [
|
||||
ibm_iam_authorization_policy.policy
|
||||
]
|
||||
|
||||
name = var.name
|
||||
href = "cos://${ibm_cos_bucket.images.region_location}/${ibm_cos_bucket.images.bucket_name}/${ibm_cos_bucket_object.file.key}"
|
||||
operating_system = "rhel-coreos-stable-amd64"
|
||||
resource_group = var.resource_group_id
|
||||
tags = var.tags
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "vsi_image_id" {
|
||||
value = ibm_is_image.image.id
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
variable "name" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_filepath" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "resource_group_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "cos_resource_instance_crn" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "endpoint_visibility" {
|
||||
type = string
|
||||
}
|
||||
@@ -1,122 +0,0 @@
|
||||
locals {
|
||||
network_resource_group_id = var.ibmcloud_network_resource_group_name == "" ? local.resource_group_id : data.ibm_resource_group.network_group.0.id
|
||||
resource_group_id = var.ibmcloud_resource_group_name == "" ? ibm_resource_group.group.0.id : data.ibm_resource_group.group.0.id
|
||||
}
|
||||
|
||||
############################################
|
||||
# Resource groups
|
||||
############################################
|
||||
|
||||
data "ibm_resource_group" "network_group" {
|
||||
count = var.ibmcloud_network_resource_group_name == "" ? 0 : 1
|
||||
name = var.ibmcloud_network_resource_group_name
|
||||
}
|
||||
|
||||
resource "ibm_resource_group" "group" {
|
||||
count = var.ibmcloud_resource_group_name == "" ? 1 : 0
|
||||
name = var.cluster_id
|
||||
}
|
||||
|
||||
data "ibm_resource_group" "group" {
|
||||
count = var.ibmcloud_resource_group_name == "" ? 0 : 1
|
||||
name = var.ibmcloud_resource_group_name
|
||||
}
|
||||
|
||||
############################################
|
||||
# Shared COS Instance
|
||||
############################################
|
||||
resource "ibm_resource_instance" "cos" {
|
||||
name = "${var.cluster_id}-cos"
|
||||
service = "cloud-object-storage"
|
||||
plan = "standard"
|
||||
location = "global"
|
||||
resource_group_id = local.resource_group_id
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
############################################
|
||||
# Import VPC Custom Image
|
||||
############################################
|
||||
|
||||
module "image" {
|
||||
source = "./image"
|
||||
|
||||
name = "${var.cluster_id}-rhcos"
|
||||
image_filepath = var.ibmcloud_image_filepath
|
||||
cluster_id = var.cluster_id
|
||||
region = var.ibmcloud_region
|
||||
resource_group_id = local.resource_group_id
|
||||
tags = local.tags
|
||||
cos_resource_instance_crn = ibm_resource_instance.cos.crn
|
||||
endpoint_visibility = local.endpoint_visibility
|
||||
}
|
||||
|
||||
############################################
|
||||
# CIS module
|
||||
############################################
|
||||
|
||||
module "cis" {
|
||||
source = "./cis"
|
||||
|
||||
cis_id = var.ibmcloud_cis_crn
|
||||
base_domain = var.base_domain
|
||||
cluster_domain = var.cluster_domain
|
||||
is_external = local.public_endpoints
|
||||
|
||||
lb_kubernetes_api_public_hostname = module.vpc.lb_kubernetes_api_public_hostname
|
||||
lb_kubernetes_api_private_hostname = module.vpc.lb_kubernetes_api_private_hostname
|
||||
}
|
||||
|
||||
############################################
|
||||
# DNS module
|
||||
############################################
|
||||
|
||||
module "dns" {
|
||||
source = "./dns"
|
||||
depends_on = [module.vpc]
|
||||
|
||||
dns_id = var.ibmcloud_dns_id
|
||||
vpc_crn = module.vpc.vpc_crn
|
||||
vpc_permitted = var.ibmcloud_vpc_permitted
|
||||
base_domain = var.base_domain
|
||||
cluster_domain = var.cluster_domain
|
||||
is_external = local.public_endpoints
|
||||
|
||||
lb_kubernetes_api_private_hostname = module.vpc.lb_kubernetes_api_private_hostname
|
||||
}
|
||||
|
||||
############################################
|
||||
# Dedicated Host module
|
||||
############################################
|
||||
|
||||
module "dhost" {
|
||||
source = "./dhost"
|
||||
|
||||
cluster_id = var.cluster_id
|
||||
dedicated_hosts_master = var.ibmcloud_master_dedicated_hosts
|
||||
dedicated_hosts_worker = var.ibmcloud_worker_dedicated_hosts
|
||||
resource_group_id = local.resource_group_id
|
||||
zones_master = distinct(var.ibmcloud_master_availability_zones)
|
||||
zones_worker = distinct(var.ibmcloud_worker_availability_zones)
|
||||
}
|
||||
|
||||
############################################
|
||||
# VPC module
|
||||
############################################
|
||||
|
||||
module "vpc" {
|
||||
source = "./vpc"
|
||||
|
||||
cluster_id = var.cluster_id
|
||||
network_resource_group_id = local.network_resource_group_id
|
||||
public_endpoints = local.public_endpoints
|
||||
resource_group_id = local.resource_group_id
|
||||
tags = local.tags
|
||||
zones_master = distinct(var.ibmcloud_master_availability_zones)
|
||||
zones_worker = distinct(var.ibmcloud_worker_availability_zones)
|
||||
|
||||
preexisting_vpc = var.ibmcloud_preexisting_vpc
|
||||
cluster_vpc = var.ibmcloud_vpc
|
||||
control_plane_subnets = var.ibmcloud_control_plane_subnets
|
||||
compute_subnets = var.ibmcloud_compute_subnets
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
#######################################
|
||||
# Network module outputs
|
||||
#######################################
|
||||
|
||||
output "control_plane_dedicated_host_id_list" {
|
||||
value = module.dhost.control_plane_dedicated_host_id_list
|
||||
}
|
||||
|
||||
output "control_plane_security_group_id_list" {
|
||||
value = module.vpc.control_plane_security_group_id_list
|
||||
}
|
||||
|
||||
output "control_plane_subnet_id_list" {
|
||||
value = module.vpc.control_plane_subnet_id_list
|
||||
}
|
||||
|
||||
output "control_plane_subnet_zone_list" {
|
||||
value = module.vpc.control_plane_subnet_zone_list
|
||||
}
|
||||
|
||||
output "compute_subnet_id_list" {
|
||||
value = module.vpc.compute_subnet_id_list
|
||||
}
|
||||
|
||||
output "cos_resource_instance_crn" {
|
||||
value = ibm_resource_instance.cos.crn
|
||||
}
|
||||
|
||||
output "lb_kubernetes_api_public_id" {
|
||||
value = module.vpc.lb_kubernetes_api_public_id
|
||||
}
|
||||
|
||||
output "lb_kubernetes_api_private_id" {
|
||||
value = module.vpc.lb_kubernetes_api_private_id
|
||||
}
|
||||
|
||||
output "lb_pool_kubernetes_api_public_id" {
|
||||
value = module.vpc.lb_pool_kubernetes_api_public_id
|
||||
}
|
||||
|
||||
output "lb_pool_kubernetes_api_private_id" {
|
||||
value = module.vpc.lb_pool_kubernetes_api_private_id
|
||||
}
|
||||
|
||||
output "lb_pool_machine_config_id" {
|
||||
value = module.vpc.lb_pool_machine_config_id
|
||||
}
|
||||
|
||||
output "resource_group_id" {
|
||||
value = local.resource_group_id
|
||||
}
|
||||
|
||||
output "vpc_id" {
|
||||
value = module.vpc.vpc_id
|
||||
}
|
||||
|
||||
output "vsi_image_id" {
|
||||
value = module.image.vsi_image_id
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
locals {
|
||||
# Common locals
|
||||
prefix = var.cluster_id
|
||||
zones_all = distinct(concat(var.zones_master, var.zones_worker))
|
||||
|
||||
# VPC locals
|
||||
vpc_id = var.preexisting_vpc ? data.ibm_is_vpc.vpc[0].id : ibm_is_vpc.vpc[0].id
|
||||
vpc_crn = var.preexisting_vpc ? data.ibm_is_vpc.vpc[0].crn : ibm_is_vpc.vpc[0].crn
|
||||
|
||||
# LB locals
|
||||
port_kubernetes_api = 6443
|
||||
port_machine_config = 22623
|
||||
control_plane_subnets = var.preexisting_vpc ? data.ibm_is_subnet.control_plane[*] : ibm_is_subnet.control_plane[*]
|
||||
compute_subnets = var.preexisting_vpc ? data.ibm_is_subnet.compute[*] : ibm_is_subnet.compute[*]
|
||||
|
||||
# SG locals
|
||||
subnet_cidr_blocks = concat(local.control_plane_subnets[*].ipv4_cidr_block, local.compute_subnets[*].ipv4_cidr_block)
|
||||
}
|
||||
|
||||
data "ibm_is_vpc" "vpc" {
|
||||
count = var.preexisting_vpc ? 1 : 0
|
||||
|
||||
name = var.cluster_vpc
|
||||
}
|
||||
|
||||
data "ibm_is_subnet" "control_plane" {
|
||||
count = var.preexisting_vpc ? length(var.control_plane_subnets) : 0
|
||||
|
||||
name = var.control_plane_subnets[count.index]
|
||||
}
|
||||
|
||||
data "ibm_is_subnet" "compute" {
|
||||
count = var.preexisting_vpc ? length(var.compute_subnets) : 0
|
||||
|
||||
name = var.compute_subnets[count.index]
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
############################################
|
||||
# Load balancers
|
||||
############################################
|
||||
|
||||
resource "ibm_is_lb" "kubernetes_api_private" {
|
||||
name = "${local.prefix}-kubernetes-api-private"
|
||||
resource_group = var.resource_group_id
|
||||
security_groups = [ibm_is_security_group.kubernetes_api_lb.id]
|
||||
subnets = local.control_plane_subnets[*].id
|
||||
tags = var.tags
|
||||
type = "private"
|
||||
}
|
||||
|
||||
############################################
|
||||
# Load balancer backend pools
|
||||
############################################
|
||||
|
||||
resource "ibm_is_lb_pool" "kubernetes_api_private" {
|
||||
name = "${local.prefix}-kubernetes-api-private"
|
||||
lb = ibm_is_lb.kubernetes_api_private.id
|
||||
algorithm = "round_robin"
|
||||
protocol = "tcp"
|
||||
health_delay = 60
|
||||
health_retries = 5
|
||||
health_timeout = 30
|
||||
health_type = "https"
|
||||
health_monitor_url = "/readyz"
|
||||
health_monitor_port = local.port_kubernetes_api
|
||||
}
|
||||
|
||||
resource "ibm_is_lb_pool" "machine_config" {
|
||||
name = "${local.prefix}-machine-config"
|
||||
lb = ibm_is_lb.kubernetes_api_private.id
|
||||
algorithm = "round_robin"
|
||||
protocol = "tcp"
|
||||
health_delay = 60
|
||||
health_retries = 5
|
||||
health_timeout = 30
|
||||
health_type = "tcp"
|
||||
health_monitor_port = local.port_machine_config
|
||||
}
|
||||
|
||||
############################################
|
||||
# Load balancer frontend listeners
|
||||
############################################
|
||||
|
||||
resource "ibm_is_lb_listener" "kubernetes_api_private" {
|
||||
lb = ibm_is_lb.kubernetes_api_private.id
|
||||
default_pool = ibm_is_lb_pool.kubernetes_api_private.id
|
||||
port = local.port_kubernetes_api
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "ibm_is_lb_listener" "machine_config" {
|
||||
lb = ibm_is_lb.kubernetes_api_private.id
|
||||
default_pool = ibm_is_lb_pool.machine_config.id
|
||||
port = local.port_machine_config
|
||||
protocol = "tcp"
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
############################################
|
||||
# Load balancers
|
||||
############################################
|
||||
|
||||
resource "ibm_is_lb" "kubernetes_api_public" {
|
||||
count = var.public_endpoints ? 1 : 0
|
||||
|
||||
name = "${local.prefix}-kubernetes-api-public"
|
||||
resource_group = var.resource_group_id
|
||||
security_groups = [ibm_is_security_group.kubernetes_api_lb.id]
|
||||
subnets = local.control_plane_subnets[*].id
|
||||
tags = var.tags
|
||||
type = "public"
|
||||
}
|
||||
|
||||
############################################
|
||||
# Load balancer backend pools
|
||||
############################################
|
||||
|
||||
resource "ibm_is_lb_pool" "kubernetes_api_public" {
|
||||
count = var.public_endpoints ? 1 : 0
|
||||
|
||||
name = "${local.prefix}-kubernetes-api-public"
|
||||
lb = ibm_is_lb.kubernetes_api_public.0.id
|
||||
algorithm = "round_robin"
|
||||
protocol = "tcp"
|
||||
health_delay = 60
|
||||
health_retries = 5
|
||||
health_timeout = 30
|
||||
health_type = "https"
|
||||
health_monitor_url = "/readyz"
|
||||
health_monitor_port = local.port_kubernetes_api
|
||||
}
|
||||
|
||||
############################################
|
||||
# Load balancer frontend listeners
|
||||
############################################
|
||||
|
||||
resource "ibm_is_lb_listener" "kubernetes_api_public" {
|
||||
count = var.public_endpoints ? 1 : 0
|
||||
|
||||
lb = ibm_is_lb.kubernetes_api_public.0.id
|
||||
default_pool = ibm_is_lb_pool.kubernetes_api_public.0.id
|
||||
port = local.port_kubernetes_api
|
||||
protocol = "tcp"
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
#######################################
|
||||
# VPC module outputs
|
||||
#######################################
|
||||
|
||||
output "control_plane_security_group_id_list" {
|
||||
value = [
|
||||
ibm_is_security_group.cluster_wide.id,
|
||||
ibm_is_security_group.openshift_network.id,
|
||||
ibm_is_security_group.control_plane.id,
|
||||
ibm_is_security_group.control_plane_internal.id,
|
||||
]
|
||||
}
|
||||
|
||||
output "control_plane_subnet_id_list" {
|
||||
value = local.control_plane_subnets[*].id
|
||||
}
|
||||
|
||||
output "control_plane_subnet_zone_list" {
|
||||
value = local.control_plane_subnets[*].zone
|
||||
}
|
||||
|
||||
output "compute_subnet_id_list" {
|
||||
value = local.compute_subnets[*].id
|
||||
}
|
||||
|
||||
output "lb_kubernetes_api_public_hostname" {
|
||||
value = var.public_endpoints ? ibm_is_lb.kubernetes_api_public.0.hostname : ""
|
||||
}
|
||||
|
||||
output "lb_kubernetes_api_public_id" {
|
||||
# Wait for frontend listeners to be ready before use
|
||||
depends_on = [
|
||||
ibm_is_lb_listener.kubernetes_api_public
|
||||
]
|
||||
value = var.public_endpoints ? ibm_is_lb.kubernetes_api_public.0.id : ""
|
||||
}
|
||||
|
||||
output "lb_kubernetes_api_private_hostname" {
|
||||
value = ibm_is_lb.kubernetes_api_private.hostname
|
||||
}
|
||||
|
||||
output "lb_kubernetes_api_private_id" {
|
||||
# Wait for frontend listeners to be ready before use
|
||||
depends_on = [
|
||||
ibm_is_lb_listener.kubernetes_api_private,
|
||||
ibm_is_lb_listener.machine_config,
|
||||
]
|
||||
value = ibm_is_lb.kubernetes_api_private.id
|
||||
}
|
||||
|
||||
output "lb_pool_kubernetes_api_public_id" {
|
||||
value = var.public_endpoints ? ibm_is_lb_pool.kubernetes_api_public.0.id : ""
|
||||
}
|
||||
|
||||
output "lb_pool_kubernetes_api_private_id" {
|
||||
value = ibm_is_lb_pool.kubernetes_api_private.id
|
||||
}
|
||||
|
||||
output "lb_pool_machine_config_id" {
|
||||
value = ibm_is_lb_pool.machine_config.id
|
||||
}
|
||||
|
||||
output "vpc_id" {
|
||||
value = local.vpc_id
|
||||
}
|
||||
|
||||
output "vpc_crn" {
|
||||
value = local.vpc_crn
|
||||
}
|
||||
@@ -1,294 +0,0 @@
|
||||
# NOTE: Security group rules enforces network access based on OCP requirements
|
||||
# https://docs.openshift.com/container-platform/4.9/installing/installing_platform_agnostic/installing-platform-agnostic.html#installation-network-connectivity-user-infra_installing-platform-agnostic
|
||||
|
||||
# NOTE: Security group limitations
|
||||
# 5 per network interface (NIC) on a virtual server instance
|
||||
# 5 remote rules per security group
|
||||
|
||||
############################################
|
||||
# Security group (Cluster-wide)
|
||||
############################################
|
||||
|
||||
resource "ibm_is_security_group" "cluster_wide" {
|
||||
name = "${local.prefix}-sg-cluster-wide"
|
||||
resource_group = var.resource_group_id
|
||||
tags = var.tags
|
||||
vpc = local.vpc_id
|
||||
}
|
||||
|
||||
# SSH
|
||||
resource "ibm_is_security_group_rule" "cluster_wide_ssh_inbound" {
|
||||
count = length(local.subnet_cidr_blocks)
|
||||
|
||||
group = ibm_is_security_group.cluster_wide.id
|
||||
direction = "inbound"
|
||||
remote = local.subnet_cidr_blocks[count.index]
|
||||
tcp {
|
||||
port_min = 22
|
||||
port_max = 22
|
||||
}
|
||||
}
|
||||
|
||||
# ICMP
|
||||
resource "ibm_is_security_group_rule" "cluster_wide_icmp_inbound" {
|
||||
group = ibm_is_security_group.cluster_wide.id
|
||||
direction = "inbound"
|
||||
remote = ibm_is_security_group.cluster_wide.id
|
||||
icmp {}
|
||||
}
|
||||
|
||||
# VXLAN and Geneve - port 4789
|
||||
resource "ibm_is_security_group_rule" "cluster_wide_vxlan_geneve_4789_inbound" {
|
||||
group = ibm_is_security_group.cluster_wide.id
|
||||
direction = "inbound"
|
||||
remote = ibm_is_security_group.cluster_wide.id
|
||||
udp {
|
||||
port_min = 4789
|
||||
port_max = 4789
|
||||
}
|
||||
}
|
||||
|
||||
# VXLAN and Geneve - port 6081
|
||||
resource "ibm_is_security_group_rule" "cluster_wide_vxlan_geneve_6081_inbound" {
|
||||
group = ibm_is_security_group.cluster_wide.id
|
||||
direction = "inbound"
|
||||
remote = ibm_is_security_group.cluster_wide.id
|
||||
udp {
|
||||
port_min = 6081
|
||||
port_max = 6081
|
||||
}
|
||||
}
|
||||
|
||||
# Outbound
|
||||
resource "ibm_is_security_group_rule" "cluster_wide_outbound" {
|
||||
group = ibm_is_security_group.cluster_wide.id
|
||||
direction = "outbound"
|
||||
remote = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
############################################
|
||||
# Security group (OpenShift network)
|
||||
############################################
|
||||
|
||||
resource "ibm_is_security_group" "openshift_network" {
|
||||
name = "${local.prefix}-sg-openshift-net"
|
||||
resource_group = var.resource_group_id
|
||||
tags = var.tags
|
||||
vpc = local.vpc_id
|
||||
}
|
||||
|
||||
# Host level services - TCP
|
||||
resource "ibm_is_security_group_rule" "openshift_network_host_services_tcp_inbound" {
|
||||
group = ibm_is_security_group.openshift_network.id
|
||||
direction = "inbound"
|
||||
remote = ibm_is_security_group.openshift_network.id
|
||||
tcp {
|
||||
port_min = 9000
|
||||
port_max = 9999
|
||||
}
|
||||
}
|
||||
|
||||
# Host level services - UDP
|
||||
resource "ibm_is_security_group_rule" "openshift_network_host_services_udp_inbound" {
|
||||
group = ibm_is_security_group.openshift_network.id
|
||||
direction = "inbound"
|
||||
remote = ibm_is_security_group.openshift_network.id
|
||||
udp {
|
||||
port_min = 9000
|
||||
port_max = 9999
|
||||
}
|
||||
}
|
||||
|
||||
# Kubernetes default ports
|
||||
resource "ibm_is_security_group_rule" "openshift_network_kube_default_ports_inbound" {
|
||||
group = ibm_is_security_group.openshift_network.id
|
||||
direction = "inbound"
|
||||
remote = ibm_is_security_group.openshift_network.id
|
||||
tcp {
|
||||
port_min = 10250
|
||||
port_max = 10250
|
||||
}
|
||||
}
|
||||
|
||||
# Due to limtation of only 5 SGs per interface and only 5 remotes per SG
|
||||
# we stick the IPsec rules here in openshift_network since this SG is added
|
||||
# to all nodes.
|
||||
# There is a max of 50 rules per SG, so if we have more subnets this will break.
|
||||
|
||||
# IPsec IKE - port 500
|
||||
resource "ibm_is_security_group_rule" "openshift_network_ipsec_ike_500_inbound" {
|
||||
group = ibm_is_security_group.openshift_network.id
|
||||
direction = "inbound"
|
||||
remote = ibm_is_security_group.openshift_network.id
|
||||
udp {
|
||||
port_min = 500
|
||||
port_max = 500
|
||||
}
|
||||
}
|
||||
|
||||
# IPsec IKE NAT-T - port 4500
|
||||
resource "ibm_is_security_group_rule" "openshift_network_ipsec_ike_nat_t_4500_inbound" {
|
||||
group = ibm_is_security_group.openshift_network.id
|
||||
direction = "inbound"
|
||||
remote = ibm_is_security_group.openshift_network.id
|
||||
udp {
|
||||
port_min = 4500
|
||||
port_max = 4500
|
||||
}
|
||||
}
|
||||
|
||||
# Kubernetes node ports - TCP
|
||||
# Allows access to node ports from within VPC subnets to accommodate CCM LBs
|
||||
resource "ibm_is_security_group_rule" "openshift_network_node_ports_tcp_inbound" {
|
||||
count = length(local.subnet_cidr_blocks)
|
||||
|
||||
group = ibm_is_security_group.openshift_network.id
|
||||
direction = "inbound"
|
||||
remote = local.subnet_cidr_blocks[count.index]
|
||||
tcp {
|
||||
port_min = 30000
|
||||
port_max = 32767
|
||||
}
|
||||
}
|
||||
|
||||
# Kubernetes node ports - UDP
|
||||
# Allows access to node ports from within VPC subnets to accommodate CCM LBs
|
||||
resource "ibm_is_security_group_rule" "openshift_network_node_ports_udp_inbound" {
|
||||
count = length(local.subnet_cidr_blocks)
|
||||
|
||||
group = ibm_is_security_group.openshift_network.id
|
||||
direction = "inbound"
|
||||
remote = local.subnet_cidr_blocks[count.index]
|
||||
udp {
|
||||
port_min = 30000
|
||||
port_max = 32767
|
||||
}
|
||||
}
|
||||
|
||||
############################################
|
||||
# Security group (Kubernetes API LB)
|
||||
############################################
|
||||
|
||||
resource "ibm_is_security_group" "kubernetes_api_lb" {
|
||||
name = "${local.prefix}-sg-kube-api-lb"
|
||||
resource_group = var.resource_group_id
|
||||
tags = var.tags
|
||||
vpc = local.vpc_id
|
||||
}
|
||||
|
||||
# Kubernetes API LB - inbound
|
||||
resource "ibm_is_security_group_rule" "kubernetes_api_lb_inbound" {
|
||||
group = ibm_is_security_group.kubernetes_api_lb.id
|
||||
direction = "inbound"
|
||||
remote = "0.0.0.0/0"
|
||||
tcp {
|
||||
port_min = 6443
|
||||
port_max = 6443
|
||||
}
|
||||
}
|
||||
|
||||
# Kubernetes API LB - outbound
|
||||
resource "ibm_is_security_group_rule" "kubernetes_api_lb_outbound" {
|
||||
group = ibm_is_security_group.kubernetes_api_lb.id
|
||||
direction = "outbound"
|
||||
remote = ibm_is_security_group.control_plane.id
|
||||
tcp {
|
||||
port_min = 6443
|
||||
port_max = 6443
|
||||
}
|
||||
}
|
||||
|
||||
# Machine config server LB - inbound
|
||||
resource "ibm_is_security_group_rule" "kubernetes_api_lb_machine_config_inbound" {
|
||||
group = ibm_is_security_group.kubernetes_api_lb.id
|
||||
direction = "inbound"
|
||||
remote = ibm_is_security_group.cluster_wide.id
|
||||
tcp {
|
||||
port_min = 22623
|
||||
port_max = 22623
|
||||
}
|
||||
}
|
||||
|
||||
# Machine config server LB - outbound
|
||||
resource "ibm_is_security_group_rule" "kubernetes_api_lb_machine_config_outbound" {
|
||||
group = ibm_is_security_group.kubernetes_api_lb.id
|
||||
direction = "outbound"
|
||||
remote = ibm_is_security_group.control_plane.id
|
||||
tcp {
|
||||
port_min = 22623
|
||||
port_max = 22623
|
||||
}
|
||||
}
|
||||
|
||||
############################################
|
||||
# Security group (Control plane)
|
||||
############################################
|
||||
|
||||
resource "ibm_is_security_group" "control_plane" {
|
||||
name = "${local.prefix}-sg-control-plane"
|
||||
resource_group = var.resource_group_id
|
||||
tags = var.tags
|
||||
vpc = local.vpc_id
|
||||
}
|
||||
|
||||
resource "ibm_is_security_group" "control_plane_internal" {
|
||||
name = "${local.prefix}-sg-cp-internal"
|
||||
resource_group = var.resource_group_id
|
||||
tags = var.tags
|
||||
vpc = local.vpc_id
|
||||
}
|
||||
|
||||
# etcd
|
||||
resource "ibm_is_security_group_rule" "control_plane_internal_etcd_inbound" {
|
||||
group = ibm_is_security_group.control_plane_internal.id
|
||||
direction = "inbound"
|
||||
remote = ibm_is_security_group.control_plane_internal.id
|
||||
tcp {
|
||||
port_min = 2379
|
||||
port_max = 2380
|
||||
}
|
||||
}
|
||||
|
||||
# Kubernetes default ports
|
||||
resource "ibm_is_security_group_rule" "control_plane_internal_kube_default_ports_inbound" {
|
||||
group = ibm_is_security_group.control_plane_internal.id
|
||||
direction = "inbound"
|
||||
remote = ibm_is_security_group.cluster_wide.id
|
||||
tcp {
|
||||
port_min = 10257
|
||||
port_max = 10259
|
||||
}
|
||||
}
|
||||
|
||||
# Kubernetes API - inbound
|
||||
resource "ibm_is_security_group_rule" "control_plane_kubernetes_api_inbound" {
|
||||
group = ibm_is_security_group.control_plane.id
|
||||
direction = "inbound"
|
||||
remote = ibm_is_security_group.cluster_wide.id
|
||||
tcp {
|
||||
port_min = 6443
|
||||
port_max = 6443
|
||||
}
|
||||
}
|
||||
|
||||
# Kubernetes API - inbound via LB
|
||||
resource "ibm_is_security_group_rule" "control_plane_kubernetes_api_lb_inbound" {
|
||||
group = ibm_is_security_group.control_plane.id
|
||||
direction = "inbound"
|
||||
remote = ibm_is_security_group.kubernetes_api_lb.id
|
||||
tcp {
|
||||
port_min = 6443
|
||||
port_max = 6443
|
||||
}
|
||||
}
|
||||
|
||||
# Machine config server - inbound via LB
|
||||
resource "ibm_is_security_group_rule" "control_plane_machine_config_lb_inbound" {
|
||||
group = ibm_is_security_group.control_plane.id
|
||||
direction = "inbound"
|
||||
remote = ibm_is_security_group.kubernetes_api_lb.id
|
||||
tcp {
|
||||
port_min = 22623
|
||||
port_max = 22623
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
#######################################
|
||||
# VPC module variables
|
||||
#######################################
|
||||
|
||||
variable "cluster_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "network_resource_group_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "public_endpoints" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "resource_group_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "zones_master" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "zones_worker" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "preexisting_vpc" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "cluster_vpc" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "control_plane_subnets" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "compute_subnets" {
|
||||
type = list(string)
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
############################################
|
||||
# VPC
|
||||
############################################
|
||||
|
||||
resource "ibm_is_vpc" "vpc" {
|
||||
count = var.preexisting_vpc ? 0 : 1
|
||||
name = "${local.prefix}-vpc"
|
||||
resource_group = var.network_resource_group_id
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
############################################
|
||||
# Public gateways
|
||||
############################################
|
||||
|
||||
resource "ibm_is_public_gateway" "public_gateway" {
|
||||
count = var.preexisting_vpc ? 0 : length(local.zones_all)
|
||||
|
||||
name = "${local.prefix}-public-gateway-${local.zones_all[count.index]}"
|
||||
resource_group = var.network_resource_group_id
|
||||
tags = var.tags
|
||||
vpc = ibm_is_vpc.vpc[0].id
|
||||
zone = local.zones_all[count.index]
|
||||
}
|
||||
|
||||
############################################
|
||||
# Subnets
|
||||
############################################
|
||||
|
||||
resource "ibm_is_subnet" "control_plane" {
|
||||
count = var.preexisting_vpc ? 0 : length(var.zones_master)
|
||||
|
||||
name = "${local.prefix}-subnet-control-plane-${var.zones_master[count.index]}"
|
||||
resource_group = var.network_resource_group_id
|
||||
tags = var.tags
|
||||
vpc = ibm_is_vpc.vpc[0].id
|
||||
zone = var.zones_master[count.index]
|
||||
public_gateway = ibm_is_public_gateway.public_gateway[index(ibm_is_public_gateway.public_gateway.*.zone, var.zones_master[count.index])].id
|
||||
total_ipv4_address_count = "256"
|
||||
}
|
||||
|
||||
resource "ibm_is_subnet" "compute" {
|
||||
count = var.preexisting_vpc ? 0 : length(var.zones_worker)
|
||||
|
||||
name = "${local.prefix}-subnet-compute-${var.zones_worker[count.index]}"
|
||||
resource_group = var.network_resource_group_id
|
||||
tags = var.tags
|
||||
vpc = ibm_is_vpc.vpc[0].id
|
||||
zone = var.zones_worker[count.index]
|
||||
public_gateway = ibm_is_public_gateway.public_gateway[index(ibm_is_public_gateway.public_gateway.*.zone, var.zones_worker[count.index])].id
|
||||
total_ipv4_address_count = "256"
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
#######################################
|
||||
# Top-level module variables (required)
|
||||
#######################################
|
||||
|
||||
variable "ibmcloud_api_key" {
|
||||
type = string
|
||||
# TODO: Supported on tf 0.14
|
||||
# sensitive = true
|
||||
description = "The IAM API key for authenticating with IBM Cloud APIs."
|
||||
}
|
||||
|
||||
variable "ibmcloud_bootstrap_instance_type" {
|
||||
type = string
|
||||
description = "Instance type for the bootstrap node. Example: `bx2-4x16`"
|
||||
}
|
||||
|
||||
variable "ibmcloud_cis_crn" {
|
||||
type = string
|
||||
description = "The CRN of CIS instance to use."
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "ibmcloud_dns_id" {
|
||||
type = string
|
||||
description = "The ID of DNS Service instance to use."
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "ibmcloud_region" {
|
||||
type = string
|
||||
description = "The target IBM Cloud region for the cluster."
|
||||
}
|
||||
|
||||
variable "ibmcloud_master_instance_type" {
|
||||
type = string
|
||||
description = "Instance type for the master node(s). Example: `bx2-4x16`"
|
||||
}
|
||||
|
||||
variable "ibmcloud_master_availability_zones" {
|
||||
type = list(string)
|
||||
description = "The availability zones in which to create the masters. The length of this list must match master_count."
|
||||
}
|
||||
|
||||
variable "ibmcloud_worker_availability_zones" {
|
||||
type = list(string)
|
||||
description = "The availability zones to provision for workers. Worker instances are created by the machine-API operator, but this variable controls their supporting infrastructure (subnets, routing, dedicated hosts, etc.)."
|
||||
}
|
||||
|
||||
variable "ibmcloud_image_filepath" {
|
||||
type = string
|
||||
description = "The file path to the RHCOS image"
|
||||
}
|
||||
|
||||
variable "ibmcloud_terraform_private_visibility" {
|
||||
type = bool
|
||||
description = "Specified whether the IBM Cloud terraform provider visibility mode should be private, for endpoint usage."
|
||||
default = false
|
||||
}
|
||||
|
||||
#######################################
|
||||
# Top-level module variables (optional)
|
||||
#######################################
|
||||
|
||||
variable "ibmcloud_endpoints_json_file" {
|
||||
type = string
|
||||
description = "JSON file containing IBM Cloud service endpoints"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "ibmcloud_preexisting_vpc" {
|
||||
type = bool
|
||||
description = "Specifies whether an existing VPC should be used or a new one created for installation."
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "ibmcloud_vpc_permitted" {
|
||||
type = bool
|
||||
description = "Specifies whether an existing VPC is already a Permitted Network for DNS Instance, for Private clusters."
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "ibmcloud_vpc" {
|
||||
type = string
|
||||
description = "The name of an existing cluster VPC."
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "ibmcloud_control_plane_boot_volume_key" {
|
||||
type = string
|
||||
description = "IBM Cloud Key Protect key CRN to use to encrypt the control plane's volume(s)."
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "ibmcloud_control_plane_subnets" {
|
||||
type = list(string)
|
||||
description = "The names of the existing subnets for the control plane."
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "ibmcloud_compute_subnets" {
|
||||
type = list(string)
|
||||
description = "The names of the existing subnets for the compute plane."
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "ibmcloud_master_dedicated_hosts" {
|
||||
type = list(map(string))
|
||||
description = "(optional) The list of dedicated hosts in which to create the control plane nodes."
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "ibmcloud_worker_dedicated_hosts" {
|
||||
type = list(map(string))
|
||||
description = "(optional) The list of dedicated hosts in which to create the compute nodes."
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "ibmcloud_extra_tags" {
|
||||
type = list(string)
|
||||
description = <<EOF
|
||||
(optional) Extra IBM Cloud tags to be applied to created resources.
|
||||
Example: `[ "key:value", "foo:bar" ]`
|
||||
EOF
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "ibmcloud_publish_strategy" {
|
||||
type = string
|
||||
description = "The cluster publishing strategy, either Internal or External"
|
||||
default = "External"
|
||||
# TODO: Supported on tf 0.13
|
||||
# validation {
|
||||
# condition = "External" || "Internal"
|
||||
# error_message = "The ibmcloud_publish_strategy value must be \"External\" or \"Internal\"."
|
||||
# }
|
||||
}
|
||||
|
||||
variable "ibmcloud_network_resource_group_name" {
|
||||
type = string
|
||||
description = <<EOF
|
||||
(optional) The name of the resource group for existing cluster network resources. If this is set, the existing network resources
|
||||
(VPC, Subnets, etc.) must exist in the resource group to be used for cluster creation. Otherwise, new network resources are
|
||||
created in the same resource group as the other cluster resources (see 'ibmcloud_resource_group_name').
|
||||
EOF
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "ibmcloud_resource_group_name" {
|
||||
type = string
|
||||
description = <<EOF
|
||||
(optional) The name of the resource group for the cluster. If this is set, the cluster is installed to that existing resource group
|
||||
otherwise a new resource group will be created using cluster id.
|
||||
EOF
|
||||
default = ""
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
# This file just uses aliases defined in OWNERS_ALIASES.
|
||||
|
||||
approvers:
|
||||
- libvirt-approvers
|
||||
reviewers:
|
||||
- libvirt-reviewers
|
||||
@@ -1,49 +0,0 @@
|
||||
# Bootstrap Module
|
||||
|
||||
This [Terraform][] [module][] manages [libvirt][] resources only needed during cluster bootstrapping.
|
||||
It uses [implicit provider inheritance][implicit-provider-inheritance] to access the [libvirt provider][libvirt-provider].
|
||||
|
||||
## Example
|
||||
|
||||
Set up a `main.tf` with:
|
||||
|
||||
```hcl
|
||||
provider "libvirt" {
|
||||
uri = "qemu:///system"
|
||||
}
|
||||
|
||||
resource "libvirt_network" "example" {
|
||||
name = "example"
|
||||
mode = "none"
|
||||
domain = "example.com"
|
||||
addresses = ["192.168.0.0/24"]
|
||||
}
|
||||
|
||||
resource "libvirt_volume" "example" {
|
||||
name = "example"
|
||||
source = "file:///path/to/example.qcow2"
|
||||
}
|
||||
|
||||
module "bootstrap" {
|
||||
source = "github.com/openshift/installer//data/data/libvirt/bootstrap"
|
||||
|
||||
addresses = ["192.168.0.1"]
|
||||
base_volume_id = "${libvirt_volume.example.id}"
|
||||
cluster_id = "my-cluster"
|
||||
ignition = "{\"ignition\": {\"version\": \"3.1.0\"}}",
|
||||
network_id = "${libvirt_network.example.id}"
|
||||
}
|
||||
```
|
||||
|
||||
Then run:
|
||||
|
||||
```console
|
||||
$ terraform init
|
||||
$ terraform plan
|
||||
```
|
||||
|
||||
[libvirt]: https://libvirt.org/
|
||||
[libvirt-provider]: https://github.com/dmacvicar/terraform-provider-libvirt
|
||||
[implicit-provider-inheritance]: https://www.terraform.io/docs/modules/usage.html#implicit-provider-inheritance
|
||||
[module]: https://www.terraform.io/docs/modules/
|
||||
[Terraform]: https://www.terraform.io/
|
||||
@@ -1,52 +0,0 @@
|
||||
provider "libvirt" {
|
||||
uri = var.libvirt_uri
|
||||
}
|
||||
|
||||
resource "libvirt_volume" "bootstrap" {
|
||||
name = "${var.cluster_id}-bootstrap"
|
||||
base_volume_id = var.base_volume_id
|
||||
pool = var.pool
|
||||
# Bump this so it works for OKD too
|
||||
size = "34359738368"
|
||||
}
|
||||
|
||||
resource "libvirt_ignition" "bootstrap" {
|
||||
name = "${var.cluster_id}-bootstrap.ign"
|
||||
content = var.ignition_bootstrap
|
||||
pool = var.pool
|
||||
}
|
||||
|
||||
resource "libvirt_domain" "bootstrap" {
|
||||
name = "${var.cluster_id}-bootstrap"
|
||||
|
||||
memory = var.libvirt_bootstrap_memory
|
||||
|
||||
vcpu = "2"
|
||||
|
||||
coreos_ignition = libvirt_ignition.bootstrap.id
|
||||
|
||||
disk {
|
||||
volume_id = libvirt_volume.bootstrap.id
|
||||
}
|
||||
|
||||
console {
|
||||
type = "pty"
|
||||
target_port = 0
|
||||
}
|
||||
|
||||
cpu {
|
||||
mode = "host-passthrough"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network_id = var.network_id
|
||||
hostname = "${var.cluster_id}-bootstrap.${var.cluster_domain}"
|
||||
addresses = [var.libvirt_bootstrap_ip]
|
||||
}
|
||||
|
||||
graphics {
|
||||
type = "vnc"
|
||||
listen_type = "address"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
output "bootstrap_ip" {
|
||||
value = var.libvirt_bootstrap_ip
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
variable "base_volume_id" {
|
||||
type = string
|
||||
description = "The ID of the base volume for the bootstrap node."
|
||||
}
|
||||
|
||||
variable "network_id" {
|
||||
type = string
|
||||
description = "The ID of a network resource containing the bootstrap node's addresses."
|
||||
}
|
||||
|
||||
variable "pool" {
|
||||
type = string
|
||||
description = "The name of the storage pool."
|
||||
}
|
||||
@@ -1,137 +0,0 @@
|
||||
provider "libvirt" {
|
||||
uri = var.libvirt_uri
|
||||
}
|
||||
|
||||
resource "libvirt_pool" "storage_pool" {
|
||||
name = var.cluster_id
|
||||
type = "dir"
|
||||
path = "/var/lib/libvirt/openshift-images/${var.cluster_id}"
|
||||
}
|
||||
|
||||
resource "libvirt_volume" "coreos_base" {
|
||||
name = "${var.cluster_id}-base"
|
||||
source = var.os_image
|
||||
pool = libvirt_pool.storage_pool.name
|
||||
}
|
||||
|
||||
resource "libvirt_volume" "master" {
|
||||
count = var.master_count
|
||||
name = "${var.cluster_id}-master-${count.index}"
|
||||
base_volume_id = libvirt_volume.coreos_base.id
|
||||
pool = libvirt_pool.storage_pool.name
|
||||
size = var.libvirt_master_size
|
||||
}
|
||||
|
||||
resource "libvirt_ignition" "master" {
|
||||
name = "${var.cluster_id}-master.ign"
|
||||
content = var.ignition_master
|
||||
pool = libvirt_pool.storage_pool.name
|
||||
}
|
||||
|
||||
resource "libvirt_network" "net" {
|
||||
name = var.cluster_id
|
||||
|
||||
mode = "nat"
|
||||
bridge = var.libvirt_network_if
|
||||
|
||||
domain = var.cluster_domain
|
||||
|
||||
addresses = var.machine_v4_cidrs
|
||||
|
||||
dns {
|
||||
local_only = true
|
||||
|
||||
dynamic "hosts" {
|
||||
for_each = concat(
|
||||
data.libvirt_network_dns_host_template.bootstrap.*.rendered,
|
||||
data.libvirt_network_dns_host_template.bootstrap_int.*.rendered,
|
||||
data.libvirt_network_dns_host_template.masters.*.rendered,
|
||||
data.libvirt_network_dns_host_template.masters_int.*.rendered,
|
||||
)
|
||||
content {
|
||||
hostname = hosts.value.hostname
|
||||
ip = hosts.value.ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dnsmasq_options {
|
||||
dynamic "options" {
|
||||
for_each = concat(
|
||||
data.libvirt_network_dnsmasq_options_template.options.*.rendered,
|
||||
)
|
||||
content {
|
||||
option_name = options.value.option_name
|
||||
option_value = options.value.option_value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
autostart = true
|
||||
}
|
||||
|
||||
resource "libvirt_domain" "master" {
|
||||
count = var.master_count
|
||||
|
||||
name = "${var.cluster_id}-master-${count.index}"
|
||||
|
||||
memory = var.libvirt_master_memory
|
||||
vcpu = var.libvirt_master_vcpu
|
||||
|
||||
coreos_ignition = libvirt_ignition.master.id
|
||||
|
||||
disk {
|
||||
volume_id = element(libvirt_volume.master.*.id, count.index)
|
||||
}
|
||||
|
||||
console {
|
||||
type = "pty"
|
||||
target_port = 0
|
||||
}
|
||||
|
||||
cpu {
|
||||
mode = "host-passthrough"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network_id = libvirt_network.net.id
|
||||
hostname = "${var.cluster_id}-master-${count.index}.${var.cluster_domain}"
|
||||
addresses = [var.libvirt_master_ips[count.index]]
|
||||
}
|
||||
|
||||
graphics {
|
||||
type = "vnc"
|
||||
listen_type = "address"
|
||||
}
|
||||
}
|
||||
|
||||
data "libvirt_network_dns_host_template" "bootstrap" {
|
||||
count = var.bootstrap_dns ? 1 : 0
|
||||
ip = var.libvirt_bootstrap_ip
|
||||
hostname = "api.${var.cluster_domain}"
|
||||
}
|
||||
|
||||
data "libvirt_network_dns_host_template" "masters" {
|
||||
count = var.master_count
|
||||
ip = var.libvirt_master_ips[count.index]
|
||||
hostname = "api.${var.cluster_domain}"
|
||||
}
|
||||
|
||||
data "libvirt_network_dns_host_template" "bootstrap_int" {
|
||||
count = var.bootstrap_dns ? 1 : 0
|
||||
ip = var.libvirt_bootstrap_ip
|
||||
hostname = "api-int.${var.cluster_domain}"
|
||||
}
|
||||
|
||||
data "libvirt_network_dns_host_template" "masters_int" {
|
||||
count = var.master_count
|
||||
ip = var.libvirt_master_ips[count.index]
|
||||
hostname = "api-int.${var.cluster_domain}"
|
||||
}
|
||||
|
||||
data "libvirt_network_dnsmasq_options_template" "options" {
|
||||
count = length(var.libvirt_dnsmasq_options)
|
||||
option_name = var.libvirt_dnsmasq_options[count.index]["option_name"]
|
||||
option_value = var.libvirt_dnsmasq_options[count.index]["option_value"]
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
output "pool" {
|
||||
value = libvirt_pool.storage_pool.name
|
||||
}
|
||||
|
||||
output "base_volume_id" {
|
||||
value = libvirt_volume.coreos_base.id
|
||||
}
|
||||
|
||||
output "network_id" {
|
||||
value = libvirt_network.net.id
|
||||
}
|
||||
|
||||
output "control_plane_ips" {
|
||||
value = var.libvirt_master_ips
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
variable "bootstrap_dns" {
|
||||
default = true
|
||||
description = "Whether to include DNS entries for the bootstrap node or not."
|
||||
}
|
||||
|
||||
variable "libvirt_uri" {
|
||||
type = string
|
||||
description = "libvirt connection URI"
|
||||
}
|
||||
|
||||
variable "libvirt_network_if" {
|
||||
type = string
|
||||
description = "The name of the bridge to use"
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
type = string
|
||||
description = "The URL of the OS disk image"
|
||||
}
|
||||
|
||||
variable "libvirt_bootstrap_ip" {
|
||||
type = string
|
||||
description = "the desired bootstrap ip"
|
||||
}
|
||||
|
||||
variable "libvirt_master_ips" {
|
||||
type = list(string)
|
||||
description = "the list of desired master ips. Must match master_count"
|
||||
}
|
||||
|
||||
# It's definitely recommended to bump this if you can.
|
||||
variable "libvirt_master_memory" {
|
||||
type = string
|
||||
description = "RAM in MiB allocated to masters"
|
||||
default = "16384"
|
||||
}
|
||||
|
||||
# At some point this one is likely to default to the number
|
||||
# of physical cores you have. See also
|
||||
# https://pagure.io/standard-test-roles/pull-request/223
|
||||
variable "libvirt_master_vcpu" {
|
||||
type = string
|
||||
description = "CPUs allocated to masters"
|
||||
default = "2"
|
||||
}
|
||||
|
||||
variable "libvirt_bootstrap_memory" {
|
||||
type = number
|
||||
description = "RAM in MiB allocated to the bootstrap node"
|
||||
default = 8192
|
||||
}
|
||||
|
||||
# Currently RHCOS maintain its default 16G size if that
|
||||
# changes we need to change it here also
|
||||
# https://github.com/coreos/coreos-assembler/pull/924
|
||||
variable "libvirt_master_size" {
|
||||
type = string
|
||||
description = "Size of the volume in bytes"
|
||||
default = "17179869184"
|
||||
}
|
||||
|
||||
variable "libvirt_dnsmasq_options" {
|
||||
type = list(map(string))
|
||||
description = "A list of Dnsmasq options to be applied to the libvirt network"
|
||||
default = []
|
||||
}
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
# This file just uses aliases defined in OWNERS_ALIASES.
|
||||
|
||||
approvers:
|
||||
- nutanix-approvers
|
||||
reviewers:
|
||||
- nutanix-reviewers
|
||||
@@ -1,74 +0,0 @@
|
||||
locals {
|
||||
description = "Created By OpenShift Installer"
|
||||
}
|
||||
|
||||
provider "nutanix" {
|
||||
wait_timeout = 60
|
||||
username = var.nutanix_username
|
||||
password = var.nutanix_password
|
||||
endpoint = var.nutanix_prism_central_address
|
||||
port = var.nutanix_prism_central_port
|
||||
}
|
||||
|
||||
resource "nutanix_image" "bootstrap_ignition" {
|
||||
name = var.nutanix_bootstrap_ignition_image
|
||||
source_path = var.nutanix_bootstrap_ignition_image_filepath
|
||||
description = local.description
|
||||
|
||||
categories {
|
||||
name = var.ocp_category_key_id
|
||||
value = var.ocp_category_value_owned_id
|
||||
}
|
||||
}
|
||||
|
||||
resource "nutanix_virtual_machine" "vm_bootstrap" {
|
||||
name = "${var.cluster_id}-bootstrap"
|
||||
description = local.description
|
||||
cluster_uuid = var.nutanix_prism_element_uuids[0]
|
||||
num_vcpus_per_socket = 4
|
||||
num_sockets = 1
|
||||
memory_size_mib = 16384
|
||||
boot_device_order_list = [
|
||||
"DISK",
|
||||
"CDROM",
|
||||
"NETWORK"
|
||||
]
|
||||
|
||||
disk_list {
|
||||
device_properties {
|
||||
device_type = "DISK"
|
||||
disk_address = {
|
||||
device_index = 0
|
||||
adapter_type = "SCSI"
|
||||
}
|
||||
}
|
||||
data_source_reference = {
|
||||
kind = "image"
|
||||
uuid = var.image_id
|
||||
}
|
||||
disk_size_mib = var.nutanix_control_plane_disk_mib
|
||||
}
|
||||
|
||||
disk_list {
|
||||
device_properties {
|
||||
device_type = "CDROM"
|
||||
disk_address = {
|
||||
adapter_type = "IDE"
|
||||
device_index = 0
|
||||
}
|
||||
}
|
||||
data_source_reference = {
|
||||
kind = "image"
|
||||
uuid = nutanix_image.bootstrap_ignition.id
|
||||
}
|
||||
}
|
||||
|
||||
categories {
|
||||
name = var.ocp_category_key_id
|
||||
value = var.ocp_category_value_owned_id
|
||||
}
|
||||
|
||||
nic_list {
|
||||
subnet_uuid = var.nutanix_subnet_uuids[0]
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "bootstrap_ip" {
|
||||
value = nutanix_virtual_machine.vm_bootstrap.nic_list_status.0.ip_endpoint_list.0.ip
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
variable "image_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ocp_category_key_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ocp_category_value_owned_id" {
|
||||
type = string
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
locals {
|
||||
description = "Created By OpenShift Installer"
|
||||
}
|
||||
|
||||
provider "nutanix" {
|
||||
wait_timeout = 60
|
||||
username = var.nutanix_username
|
||||
password = var.nutanix_password
|
||||
endpoint = var.nutanix_prism_central_address
|
||||
port = var.nutanix_prism_central_port
|
||||
}
|
||||
|
||||
resource "nutanix_category_key" "ocp_category_key" {
|
||||
name = "kubernetes-io-cluster-${var.cluster_id}"
|
||||
description = "Openshift Cluster Category Key"
|
||||
}
|
||||
|
||||
resource "nutanix_category_value" "ocp_category_value_owned" {
|
||||
name = nutanix_category_key.ocp_category_key.id
|
||||
value = "owned"
|
||||
description = "Openshift Cluster Category Value: resources owned by the cluster"
|
||||
}
|
||||
|
||||
resource "nutanix_category_value" "ocp_category_value_shared" {
|
||||
name = nutanix_category_key.ocp_category_key.id
|
||||
value = "shared"
|
||||
description = "Openshift Cluster Category Value: resources used but not owned by the cluster"
|
||||
}
|
||||
|
||||
resource "nutanix_image" "rhcos" {
|
||||
name = var.nutanix_image
|
||||
source_uri = var.nutanix_image_uri
|
||||
description = local.description
|
||||
|
||||
categories {
|
||||
name = nutanix_category_key.ocp_category_key.name
|
||||
value = nutanix_category_value.ocp_category_value_owned.value
|
||||
}
|
||||
}
|
||||
|
||||
data "ignition_file" "hostname" {
|
||||
count = var.master_count
|
||||
mode = "420" // 0644
|
||||
path = "/etc/hostname"
|
||||
|
||||
content {
|
||||
content = <<EOF
|
||||
${var.cluster_id}-master-${count.index}
|
||||
EOF
|
||||
}
|
||||
}
|
||||
|
||||
data "ignition_config" "master_ignition_config" {
|
||||
count = var.master_count
|
||||
|
||||
merge {
|
||||
source = "data:text/plain;charset=utf-8;base64,${base64encode(var.ignition_master)}"
|
||||
}
|
||||
|
||||
files = [
|
||||
element(data.ignition_file.hostname.*.rendered, count.index)
|
||||
]
|
||||
}
|
||||
|
||||
resource "nutanix_virtual_machine" "vm_master" {
|
||||
count = var.master_count
|
||||
description = local.description
|
||||
name = "${var.cluster_id}-master-${count.index}"
|
||||
cluster_uuid = var.nutanix_prism_element_uuids[count.index]
|
||||
num_vcpus_per_socket = var.nutanix_control_plane_cores_per_socket
|
||||
num_sockets = var.nutanix_control_plane_num_cpus
|
||||
memory_size_mib = var.nutanix_control_plane_memory_mib
|
||||
boot_device_order_list = [
|
||||
"DISK",
|
||||
"CDROM",
|
||||
"NETWORK"
|
||||
]
|
||||
disk_list {
|
||||
device_properties {
|
||||
device_type = "DISK"
|
||||
disk_address = {
|
||||
device_index = 0
|
||||
adapter_type = "SCSI"
|
||||
}
|
||||
}
|
||||
data_source_reference = {
|
||||
kind = "image"
|
||||
uuid = nutanix_image.rhcos.id
|
||||
}
|
||||
disk_size_mib = var.nutanix_control_plane_disk_mib
|
||||
}
|
||||
|
||||
categories {
|
||||
name = nutanix_category_key.ocp_category_key.name
|
||||
value = nutanix_category_value.ocp_category_value_owned.value
|
||||
}
|
||||
|
||||
dynamic "categories" {
|
||||
for_each = (var.nutanix_control_plane_categories == null) ? {} : var.nutanix_control_plane_categories
|
||||
content {
|
||||
name = categories.key
|
||||
value = categories.value
|
||||
}
|
||||
}
|
||||
|
||||
project_reference = (length(var.nutanix_control_plane_project_uuid) != 0) ? { kind = "project", uuid = var.nutanix_control_plane_project_uuid } : null
|
||||
|
||||
guest_customization_cloud_init_user_data = base64encode(element(data.ignition_config.master_ignition_config.*.rendered, count.index))
|
||||
nic_list {
|
||||
subnet_uuid = var.nutanix_subnet_uuids[count.index]
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
output "image_id" {
|
||||
value = nutanix_image.rhcos.id
|
||||
}
|
||||
|
||||
output "ocp_category_key_id" {
|
||||
value = nutanix_category_key.ocp_category_key.id
|
||||
}
|
||||
|
||||
output "ocp_category_value_owned_id" {
|
||||
value = nutanix_category_value.ocp_category_value_owned.id
|
||||
}
|
||||
|
||||
output "ocp_category_value_shared_id" {
|
||||
value = nutanix_category_value.ocp_category_value_shared.id
|
||||
}
|
||||
|
||||
output "control_plane_ips" {
|
||||
value = nutanix_virtual_machine.vm_master[*].nic_list_status[0].ip_endpoint_list[0].ip
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
//////
|
||||
// Nutanix variables
|
||||
//////
|
||||
|
||||
variable "nutanix_prism_central_address" {
|
||||
type = string
|
||||
description = "Address to connect to Prism Central."
|
||||
}
|
||||
|
||||
variable "nutanix_prism_central_port" {
|
||||
type = string
|
||||
description = "Port to connect to Prism Central."
|
||||
}
|
||||
|
||||
variable "nutanix_username" {
|
||||
type = string
|
||||
description = "Prism Central user for the environment."
|
||||
}
|
||||
|
||||
variable "nutanix_password" {
|
||||
type = string
|
||||
description = "Prism Central user password"
|
||||
}
|
||||
|
||||
variable "nutanix_prism_element_uuids" {
|
||||
type = list(string)
|
||||
default = []
|
||||
description = "This is the uuids of the Prism Element clusters."
|
||||
}
|
||||
|
||||
variable "nutanix_image_uri" {
|
||||
type = string
|
||||
description = "This is the uri to the image file that will be imported into Prism Central."
|
||||
}
|
||||
|
||||
variable "nutanix_image" {
|
||||
type = string
|
||||
description = "This is the name to the image that will be imported into Prism Central."
|
||||
}
|
||||
|
||||
variable "nutanix_subnet_uuids" {
|
||||
type = list(string)
|
||||
default = []
|
||||
description = "This is the uuids of the publicly accessible subnets for cluster ingress and access."
|
||||
}
|
||||
|
||||
variable "nutanix_bootstrap_ignition_image" {
|
||||
type = string
|
||||
description = "Name of the image containing the bootstrap ignition files"
|
||||
}
|
||||
|
||||
variable "nutanix_bootstrap_ignition_image_filepath" {
|
||||
type = string
|
||||
description = "Path to the image containing the bootstrap ignition files"
|
||||
}
|
||||
|
||||
///////////
|
||||
// Control Plane machine variables
|
||||
///////////
|
||||
|
||||
variable "nutanix_control_plane_memory_mib" {
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "nutanix_control_plane_disk_mib" {
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "nutanix_control_plane_num_cpus" {
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "nutanix_control_plane_cores_per_socket" {
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "nutanix_control_plane_project_uuid" {
|
||||
type = string
|
||||
default = null
|
||||
description = "(optional) An existing prism-central project to be applied to control-plane vms."
|
||||
}
|
||||
|
||||
variable "nutanix_control_plane_categories" {
|
||||
type = map(string)
|
||||
|
||||
description = <<EOF
|
||||
(optional) The existing prism-central categories to be applied to control-plane vms.
|
||||
|
||||
Example: `{ "key" = "value", "foo" = "bar" }`
|
||||
EOF
|
||||
|
||||
default = {}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
# This file just uses aliases defined in OWNERS_ALIASES.
|
||||
|
||||
approvers:
|
||||
- openstack-approvers
|
||||
reviewers:
|
||||
- openstack-reviewers
|
||||
@@ -1,129 +0,0 @@
|
||||
locals {
|
||||
description = "Created By OpenShift Installer"
|
||||
}
|
||||
|
||||
provider "openstack" {
|
||||
auth_url = var.openstack_credentials_auth_url
|
||||
cert = var.openstack_credentials_cert
|
||||
cloud = var.openstack_credentials_cloud
|
||||
domain_id = var.openstack_credentials_domain_id
|
||||
domain_name = var.openstack_credentials_domain_name
|
||||
endpoint_type = var.openstack_credentials_endpoint_type
|
||||
insecure = var.openstack_credentials_insecure
|
||||
key = var.openstack_credentials_key
|
||||
password = var.openstack_credentials_password
|
||||
project_domain_id = var.openstack_credentials_project_domain_id
|
||||
project_domain_name = var.openstack_credentials_project_domain_name
|
||||
region = var.openstack_credentials_region
|
||||
swauth = var.openstack_credentials_swauth
|
||||
tenant_id = var.openstack_credentials_tenant_id
|
||||
tenant_name = var.openstack_credentials_tenant_name
|
||||
token = var.openstack_credentials_token
|
||||
use_octavia = var.openstack_credentials_use_octavia
|
||||
user_domain_id = var.openstack_credentials_user_domain_id
|
||||
user_domain_name = var.openstack_credentials_user_domain_name
|
||||
user_id = var.openstack_credentials_user_id
|
||||
user_name = var.openstack_credentials_user_name
|
||||
}
|
||||
|
||||
data "openstack_images_image_v2" "base_image" {
|
||||
name = var.openstack_base_image_name
|
||||
}
|
||||
|
||||
data "openstack_compute_flavor_v2" "bootstrap_flavor" {
|
||||
name = var.openstack_master_flavor_name
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "bootstrap_port" {
|
||||
name = "${var.cluster_id}-bootstrap-port"
|
||||
description = local.description
|
||||
|
||||
admin_state_up = "true"
|
||||
network_id = var.private_network_id
|
||||
security_group_ids = var.master_sg_ids
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
|
||||
extra_dhcp_option {
|
||||
name = "domain-search"
|
||||
value = var.cluster_domain
|
||||
}
|
||||
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.nodes_default_port.fixed_ips
|
||||
|
||||
content {
|
||||
subnet_id = fixed_ip.value["subnet_id"]
|
||||
ip_address = fixed_ip.value["ip_address"]
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "allowed_address_pairs" {
|
||||
for_each = var.openstack_user_managed_load_balancer ? [] : var.openstack_api_int_ips
|
||||
content {
|
||||
ip_address = allowed_address_pairs.value
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [var.master_port_ids]
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v3" "bootstrap_volume" {
|
||||
name = "${var.cluster_id}-bootstrap"
|
||||
count = var.openstack_master_root_volume_size == null ? 0 : 1
|
||||
description = local.description
|
||||
|
||||
size = var.openstack_master_root_volume_size
|
||||
volume_type = var.openstack_master_root_volume_types[0]
|
||||
image_id = data.openstack_images_image_v2.base_image.id
|
||||
|
||||
availability_zone = var.openstack_master_root_volume_availability_zones[0]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "bootstrap" {
|
||||
name = "${var.cluster_id}-bootstrap"
|
||||
flavor_id = data.openstack_compute_flavor_v2.bootstrap_flavor.id
|
||||
image_id = var.openstack_master_root_volume_size == null ? data.openstack_images_image_v2.base_image.id : null
|
||||
availability_zone = var.openstack_master_availability_zones[0]
|
||||
|
||||
user_data = var.openstack_bootstrap_shim_ignition
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.openstack_master_root_volume_size == null ? [] : [openstack_blockstorage_volume_v3.bootstrap_volume[0].id]
|
||||
content {
|
||||
uuid = block_device.value
|
||||
source_type = "volume"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
port = openstack_networking_port_v2.bootstrap_port.id
|
||||
}
|
||||
|
||||
dynamic "network" {
|
||||
for_each = var.openstack_additional_network_ids
|
||||
|
||||
content {
|
||||
uuid = network.value
|
||||
}
|
||||
}
|
||||
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
|
||||
metadata = {
|
||||
Name = "${var.cluster_id}-bootstrap"
|
||||
openshiftClusterID = var.cluster_id
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "bootstrap_fip" {
|
||||
count = var.openstack_external_network != "" ? 1 : 0
|
||||
description = "${var.cluster_id}-bootstrap-fip"
|
||||
pool = var.openstack_external_network
|
||||
port_id = openstack_networking_port_v2.bootstrap_port.id
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
|
||||
depends_on = [openstack_compute_instance_v2.bootstrap]
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
output "bootstrap_ip" {
|
||||
value = var.openstack_external_network != "" ? openstack_networking_floatingip_v2.bootstrap_fip[0].address : openstack_compute_instance_v2.bootstrap.access_ip_v4
|
||||
}
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
variable "master_sg_ids" {
|
||||
type = list(string)
|
||||
description = "The security group IDs to be applied to the master nodes."
|
||||
}
|
||||
|
||||
variable "private_network_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "nodes_default_port" {
|
||||
type = object({
|
||||
network_id = string
|
||||
fixed_ips = list(object({
|
||||
subnet_id = string
|
||||
ip_address = string
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
variable "master_port_ids" {
|
||||
type = list(string)
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
locals {
|
||||
master_port_ids = coalescelist(
|
||||
openstack_networking_trunk_v2.masters.*.port_id,
|
||||
openstack_networking_port_v2.masters.*.id,
|
||||
)
|
||||
master_sg_ids = concat(
|
||||
var.openstack_master_extra_sg_ids,
|
||||
[openstack_networking_secgroup_v2.master.id],
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,329 +0,0 @@
|
||||
locals {
|
||||
description = "Created By OpenShift Installer"
|
||||
}
|
||||
|
||||
provider "openstack" {
|
||||
auth_url = var.openstack_credentials_auth_url
|
||||
cert = var.openstack_credentials_cert
|
||||
cloud = var.openstack_credentials_cloud
|
||||
domain_id = var.openstack_credentials_domain_id
|
||||
domain_name = var.openstack_credentials_domain_name
|
||||
endpoint_type = var.openstack_credentials_endpoint_type
|
||||
insecure = var.openstack_credentials_insecure
|
||||
key = var.openstack_credentials_key
|
||||
password = var.openstack_credentials_password
|
||||
project_domain_id = var.openstack_credentials_project_domain_id
|
||||
project_domain_name = var.openstack_credentials_project_domain_name
|
||||
region = var.openstack_credentials_region
|
||||
swauth = var.openstack_credentials_swauth
|
||||
tenant_id = var.openstack_credentials_tenant_id
|
||||
tenant_name = var.openstack_credentials_tenant_name
|
||||
token = var.openstack_credentials_token
|
||||
use_octavia = var.openstack_credentials_use_octavia
|
||||
user_domain_id = var.openstack_credentials_user_domain_id
|
||||
user_domain_name = var.openstack_credentials_user_domain_name
|
||||
user_id = var.openstack_credentials_user_id
|
||||
user_name = var.openstack_credentials_user_name
|
||||
}
|
||||
|
||||
data "openstack_images_image_v2" "base_image" {
|
||||
name = var.openstack_base_image_name
|
||||
}
|
||||
|
||||
data "openstack_compute_flavor_v2" "masters_flavor" {
|
||||
name = var.openstack_master_flavor_name
|
||||
}
|
||||
|
||||
data "ignition_file" "hostname" {
|
||||
count = var.master_count
|
||||
mode = "420" // 0644
|
||||
path = "/etc/hostname"
|
||||
|
||||
content {
|
||||
content = <<EOF
|
||||
${var.cluster_id}-master-${count.index}
|
||||
EOF
|
||||
}
|
||||
}
|
||||
|
||||
data "ignition_config" "master_ignition_config" {
|
||||
count = var.master_count
|
||||
|
||||
merge {
|
||||
source = "data:text/plain;charset=utf-8;base64,${base64encode(var.ignition_master)}"
|
||||
}
|
||||
|
||||
files = [
|
||||
element(data.ignition_file.hostname.*.rendered, count.index)
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v3" "master_volume" {
|
||||
name = "${var.cluster_id}-master-${count.index}"
|
||||
description = local.description
|
||||
count = var.openstack_master_root_volume_size == null ? 0 : var.master_count
|
||||
|
||||
size = var.openstack_master_root_volume_size
|
||||
volume_type = var.openstack_master_root_volume_types[count.index]
|
||||
image_id = data.openstack_images_image_v2.base_image.id
|
||||
|
||||
availability_zone = var.openstack_master_root_volume_availability_zones[count.index]
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "master_group" {
|
||||
name = var.openstack_master_server_group_name
|
||||
policies = [var.openstack_master_server_group_policy]
|
||||
}
|
||||
|
||||
# The master servers are created in three separate resource definition blocks,
|
||||
# rather than a single block with a "count" meta-property, because they need to
|
||||
# be created sequentially rather than concurrently by Terraform.
|
||||
#
|
||||
# The reason why they need to be created one at a time is that OpenStack's
|
||||
# Compute module is currently unable to honour the "soft-anti-affinity" policy
|
||||
# when the servers are created concurrently.
|
||||
#
|
||||
# We chose to unroll the loop into three instances, because three is the
|
||||
# minimum number of required Control plane nodes, as stated in the
|
||||
# documentation[1].
|
||||
#
|
||||
# The expectation is that machine-api-operator will take care of creating any
|
||||
# other requested nodes as soon as the deployment is effective, and that a
|
||||
# similar workaround is applied for day-2 operations.
|
||||
#
|
||||
# [1]: https://github.com/openshift/installer/tree/master/docs/user/openstack#master-nodes
|
||||
resource "openstack_compute_instance_v2" "master_conf_0" {
|
||||
count = var.master_count > 0 ? 1 : 0
|
||||
name = "${var.cluster_id}-master-0"
|
||||
|
||||
flavor_id = data.openstack_compute_flavor_v2.masters_flavor.id
|
||||
image_id = var.openstack_master_root_volume_size == null ? data.openstack_images_image_v2.base_image.id : null
|
||||
security_groups = local.master_sg_ids
|
||||
availability_zone = var.openstack_master_availability_zones[0]
|
||||
user_data = element(
|
||||
data.ignition_config.master_ignition_config.*.rendered,
|
||||
0,
|
||||
)
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.openstack_master_root_volume_size == null ? [] : [openstack_blockstorage_volume_v3.master_volume[0].id]
|
||||
content {
|
||||
uuid = block_device.value
|
||||
source_type = "volume"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
port = local.master_port_ids[0]
|
||||
}
|
||||
|
||||
scheduler_hints {
|
||||
group = openstack_compute_servergroup_v2.master_group.id
|
||||
}
|
||||
|
||||
dynamic "network" {
|
||||
for_each = [for port in openstack_networking_port_v2.master_0_failuredomain : port.id]
|
||||
|
||||
content {
|
||||
port = network.value
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "network" {
|
||||
for_each = var.openstack_additional_network_ids
|
||||
|
||||
content {
|
||||
uuid = network.value
|
||||
}
|
||||
}
|
||||
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
|
||||
metadata = {
|
||||
Name = "${var.cluster_id}-master"
|
||||
openshiftClusterID = var.cluster_id
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "master_conf_1" {
|
||||
count = var.master_count > 1 ? 1 : 0
|
||||
name = "${var.cluster_id}-master-1"
|
||||
|
||||
flavor_id = data.openstack_compute_flavor_v2.masters_flavor.id
|
||||
image_id = var.openstack_master_root_volume_size == null ? data.openstack_images_image_v2.base_image.id : null
|
||||
security_groups = local.master_sg_ids
|
||||
availability_zone = var.openstack_master_availability_zones[1]
|
||||
user_data = element(
|
||||
data.ignition_config.master_ignition_config.*.rendered,
|
||||
1,
|
||||
)
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.openstack_master_root_volume_size == null ? [] : [openstack_blockstorage_volume_v3.master_volume[1].id]
|
||||
content {
|
||||
uuid = block_device.value
|
||||
source_type = "volume"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
port = local.master_port_ids[1]
|
||||
}
|
||||
|
||||
scheduler_hints {
|
||||
group = openstack_compute_servergroup_v2.master_group.id
|
||||
}
|
||||
|
||||
dynamic "network" {
|
||||
for_each = [for port in openstack_networking_port_v2.master_1_failuredomain : port.id]
|
||||
|
||||
content {
|
||||
port = network.value
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "network" {
|
||||
for_each = var.openstack_additional_network_ids
|
||||
|
||||
content {
|
||||
uuid = network.value
|
||||
}
|
||||
}
|
||||
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
|
||||
metadata = {
|
||||
Name = "${var.cluster_id}-master"
|
||||
openshiftClusterID = var.cluster_id
|
||||
}
|
||||
|
||||
depends_on = [openstack_compute_instance_v2.master_conf_0]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "master_conf_2" {
|
||||
count = var.master_count > 2 ? 1 : 0
|
||||
name = "${var.cluster_id}-master-2"
|
||||
|
||||
flavor_id = data.openstack_compute_flavor_v2.masters_flavor.id
|
||||
image_id = var.openstack_master_root_volume_size == null ? data.openstack_images_image_v2.base_image.id : null
|
||||
security_groups = local.master_sg_ids
|
||||
availability_zone = var.openstack_master_availability_zones[2]
|
||||
user_data = element(
|
||||
data.ignition_config.master_ignition_config.*.rendered,
|
||||
2,
|
||||
)
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.openstack_master_root_volume_size == null ? [] : [openstack_blockstorage_volume_v3.master_volume[2].id]
|
||||
content {
|
||||
uuid = block_device.value
|
||||
source_type = "volume"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
port = local.master_port_ids[2]
|
||||
}
|
||||
|
||||
scheduler_hints {
|
||||
group = openstack_compute_servergroup_v2.master_group.id
|
||||
}
|
||||
|
||||
dynamic "network" {
|
||||
for_each = [for port in openstack_networking_port_v2.master_2_failuredomain : port.id]
|
||||
|
||||
content {
|
||||
port = network.value
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "network" {
|
||||
for_each = var.openstack_additional_network_ids
|
||||
|
||||
content {
|
||||
uuid = network.value
|
||||
}
|
||||
}
|
||||
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
|
||||
metadata = {
|
||||
Name = "${var.cluster_id}-master"
|
||||
openshiftClusterID = var.cluster_id
|
||||
}
|
||||
|
||||
depends_on = [openstack_compute_instance_v2.master_conf_1]
|
||||
}
|
||||
|
||||
# Pre-create server groups for the Compute MachineSets, with the given policy.
|
||||
resource "openstack_compute_servergroup_v2" "server_groups" {
|
||||
for_each = var.openstack_worker_server_group_names
|
||||
name = each.key
|
||||
policies = [var.openstack_worker_server_group_policy]
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "master_0_failuredomain" {
|
||||
count = var.master_count > 0 ? length(var.openstack_additional_ports[0]) : 0
|
||||
|
||||
name = "${var.cluster_id}-master-0-${count.index}"
|
||||
description = local.description
|
||||
network_id = var.openstack_additional_ports[0][count.index].network_id
|
||||
security_group_ids = concat(var.openstack_master_extra_sg_ids, [openstack_networking_secgroup_v2.master.id])
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.openstack_additional_ports[0][count.index].fixed_ips
|
||||
|
||||
content {
|
||||
subnet_id = fixed_ip.value["subnet_id"]
|
||||
ip_address = fixed_ip.value["ip_address"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "master_1_failuredomain" {
|
||||
count = var.master_count > 1 ? length(var.openstack_additional_ports[1]) : 0
|
||||
|
||||
name = "${var.cluster_id}-master-1-${count.index}"
|
||||
description = local.description
|
||||
network_id = var.openstack_additional_ports[1][count.index].network_id
|
||||
security_group_ids = concat(var.openstack_master_extra_sg_ids, [openstack_networking_secgroup_v2.master.id])
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.openstack_additional_ports[1][count.index].fixed_ips
|
||||
|
||||
content {
|
||||
subnet_id = fixed_ip.value["subnet_id"]
|
||||
ip_address = fixed_ip.value["ip_address"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "master_2_failuredomain" {
|
||||
count = var.master_count > 2 ? length(var.openstack_additional_ports[2]) : 0
|
||||
|
||||
name = "${var.cluster_id}-master-2-${count.index}"
|
||||
description = local.description
|
||||
network_id = var.openstack_additional_ports[2][count.index].network_id
|
||||
security_group_ids = concat(var.openstack_master_extra_sg_ids, [openstack_networking_secgroup_v2.master.id])
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.openstack_additional_ports[2][count.index].fixed_ips
|
||||
|
||||
content {
|
||||
subnet_id = fixed_ip.value["subnet_id"]
|
||||
ip_address = fixed_ip.value["ip_address"]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
output "control_plane_ips" {
|
||||
value = concat(
|
||||
openstack_compute_instance_v2.master_conf_0.*.access_ip_v4,
|
||||
openstack_compute_instance_v2.master_conf_1.*.access_ip_v4,
|
||||
openstack_compute_instance_v2.master_conf_2.*.access_ip_v4,
|
||||
)
|
||||
}
|
||||
|
||||
output "master_sg_ids" {
|
||||
value = concat(
|
||||
var.openstack_master_extra_sg_ids,
|
||||
[openstack_networking_secgroup_v2.master.id],
|
||||
)
|
||||
}
|
||||
|
||||
output "master_port_ids" {
|
||||
value = local.master_port_ids
|
||||
}
|
||||
|
||||
output "private_network_id" {
|
||||
value = local.nodes_default_port.network_id
|
||||
}
|
||||
|
||||
output "nodes_default_port" {
|
||||
value = local.nodes_default_port
|
||||
}
|
||||
@@ -1,206 +0,0 @@
|
||||
locals {
|
||||
# Create subnet for the first MachineNetwork CIDR if we need to
|
||||
nodes_cidr_block = var.machine_v4_cidrs[0]
|
||||
nodes_default_port = var.openstack_default_machines_port != null ? var.openstack_default_machines_port : {
|
||||
network_id = openstack_networking_network_v2.openshift-private[0].id,
|
||||
fixed_ips = [{ subnet_id = openstack_networking_subnet_v2.nodes[0].id, ip_address = "" }],
|
||||
}
|
||||
nodes_ports = [for port in var.openstack_machines_ports : port != null ? port : local.nodes_default_port]
|
||||
create_router = (var.openstack_external_network != "" && var.openstack_default_machines_port == null) ? 1 : 0
|
||||
}
|
||||
|
||||
data "openstack_networking_network_v2" "external_network" {
|
||||
count = var.openstack_external_network != "" ? 1 : 0
|
||||
name = var.openstack_external_network
|
||||
network_id = var.openstack_external_network_id
|
||||
external = true
|
||||
}
|
||||
|
||||
resource "openstack_networking_network_v2" "openshift-private" {
|
||||
count = var.openstack_default_machines_port == null ? 1 : 0
|
||||
name = "${var.cluster_id}-openshift"
|
||||
admin_state_up = "true"
|
||||
description = local.description
|
||||
tags = ["openshiftClusterID=${var.cluster_id}", "${var.cluster_id}-primaryClusterNetwork"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_v2" "nodes" {
|
||||
count = var.openstack_default_machines_port == null ? 1 : 0
|
||||
name = "${var.cluster_id}-nodes"
|
||||
description = local.description
|
||||
cidr = local.nodes_cidr_block
|
||||
ip_version = 4
|
||||
network_id = openstack_networking_network_v2.openshift-private[0].id
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
dns_nameservers = var.openstack_external_dns
|
||||
|
||||
# We reserve some space at the beginning of the CIDR to use for the VIPs
|
||||
# FIXME(mandre) if we let the ports pick up VIPs automatically, we don't have
|
||||
# to do any of this.
|
||||
allocation_pool {
|
||||
start = cidrhost(local.nodes_cidr_block, 10)
|
||||
end = cidrhost(local.nodes_cidr_block, pow(2, (32 - split("/", local.nodes_cidr_block)[1])) - 2)
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "masters" {
|
||||
name = "${var.cluster_id}-master-${count.index}"
|
||||
count = var.master_count
|
||||
description = local.description
|
||||
|
||||
admin_state_up = "true"
|
||||
network_id = local.nodes_ports[count.index].network_id
|
||||
security_group_ids = concat(
|
||||
var.openstack_master_extra_sg_ids,
|
||||
[openstack_networking_secgroup_v2.master.id],
|
||||
)
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
|
||||
extra_dhcp_option {
|
||||
name = "domain-search"
|
||||
value = var.cluster_domain
|
||||
}
|
||||
|
||||
dynamic "fixed_ip" {
|
||||
for_each = local.nodes_ports[count.index].fixed_ips
|
||||
|
||||
content {
|
||||
subnet_id = fixed_ip.value["subnet_id"]
|
||||
ip_address = fixed_ip.value["ip_address"]
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "allowed_address_pairs" {
|
||||
for_each = var.openstack_user_managed_load_balancer ? [] : var.openstack_api_int_ips
|
||||
content {
|
||||
ip_address = allowed_address_pairs.value
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "allowed_address_pairs" {
|
||||
for_each = var.openstack_user_managed_load_balancer ? [] : var.openstack_ingress_ips
|
||||
content {
|
||||
ip_address = allowed_address_pairs.value
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [openstack_networking_port_v2.api_port, openstack_networking_port_v2.ingress_port,
|
||||
data.openstack_networking_port_ids_v2.api_ports, data.openstack_networking_port_ids_v2.ingress_ports]
|
||||
}
|
||||
|
||||
# Port needs to be created by the user when using dual-stack since SLAAC or Stateless
|
||||
# does not allow specification of fixed-ips during Port creation.
|
||||
data "openstack_networking_port_ids_v2" "api_ports" {
|
||||
fixed_ip = var.openstack_api_int_ips[0]
|
||||
network_id = local.nodes_default_port.network_id
|
||||
}
|
||||
|
||||
# Port needs to be created by the user when using dual-stack since SLAAC or Stateless
|
||||
# does not allow specification of fixed-ips during Port creation.
|
||||
data "openstack_networking_port_ids_v2" "ingress_ports" {
|
||||
fixed_ip = var.openstack_ingress_ips[0]
|
||||
network_id = local.nodes_default_port.network_id
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_secgroup_associate_v2" "api_port_sg" {
|
||||
count = (! var.openstack_user_managed_load_balancer && var.use_ipv6) ? 1 : 0
|
||||
port_id = data.openstack_networking_port_ids_v2.api_ports.ids[0]
|
||||
security_group_ids = [openstack_networking_secgroup_v2.master.id]
|
||||
depends_on = [data.openstack_networking_port_ids_v2.api_ports]
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_secgroup_associate_v2" "ingress_port_sg" {
|
||||
count = (! var.openstack_user_managed_load_balancer && var.use_ipv6) ? 1 : 0
|
||||
port_id = data.openstack_networking_port_ids_v2.ingress_ports.ids[0]
|
||||
security_group_ids = [openstack_networking_secgroup_v2.worker.id]
|
||||
depends_on = [data.openstack_networking_port_ids_v2.ingress_ports]
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "api_port" {
|
||||
count = var.openstack_user_managed_load_balancer || var.use_ipv6 ? 0 : 1
|
||||
name = "${var.cluster_id}-api-port"
|
||||
description = local.description
|
||||
|
||||
admin_state_up = "true"
|
||||
network_id = local.nodes_default_port.network_id
|
||||
security_group_ids = [openstack_networking_secgroup_v2.master.id]
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
|
||||
dynamic "fixed_ip" {
|
||||
for_each = local.nodes_default_port.fixed_ips
|
||||
|
||||
content {
|
||||
subnet_id = fixed_ip.value["subnet_id"]
|
||||
ip_address = var.openstack_api_int_ips[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "ingress_port" {
|
||||
count = var.openstack_user_managed_load_balancer || var.use_ipv6 ? 0 : 1
|
||||
name = "${var.cluster_id}-ingress-port"
|
||||
description = local.description
|
||||
|
||||
admin_state_up = "true"
|
||||
network_id = local.nodes_default_port.network_id
|
||||
security_group_ids = [openstack_networking_secgroup_v2.worker.id]
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
|
||||
dynamic "fixed_ip" {
|
||||
for_each = local.nodes_default_port.fixed_ips
|
||||
|
||||
content {
|
||||
subnet_id = fixed_ip.value["subnet_id"]
|
||||
ip_address = var.openstack_ingress_ips[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If external network is defined, assign the floating IP to one of the masters.
|
||||
//
|
||||
// Strictly speaking, this is not required to finish the installation. We
|
||||
// support environments without floating IPs. However, since the installer
|
||||
// is running outside of the nodes subnet (often outside of the OpenStack
|
||||
// cluster itself), it needs a floating IP to monitor the progress.
|
||||
//
|
||||
// This IP address is not expected to be the final solution for providing HA.
|
||||
// It is only here to let the installer finish without any errors. Configuring
|
||||
// a load balancer and providing external connectivity is a post-installation
|
||||
// step that can't always be automated (we need to support OpenStack clusters)
|
||||
// that do not have or do not want to use Octavia.
|
||||
//
|
||||
// If an external network has not been defined then a floating IP
|
||||
// will not be provided or assigned to the masters.
|
||||
//
|
||||
// If the floating IP is not provided, the installer will time out waiting for
|
||||
// bootstrapping to complete, but the OpenShift cluster itself should come up
|
||||
// as expected.
|
||||
|
||||
resource "openstack_networking_floatingip_associate_v2" "api_fip" {
|
||||
count = (var.openstack_user_managed_load_balancer || length(var.openstack_api_floating_ip) == 0) ? 0 : 1
|
||||
port_id = var.use_ipv6 ? data.openstack_networking_port_ids_v2.api_ports.ids[0] : openstack_networking_port_v2.api_port[0].id
|
||||
floating_ip = var.openstack_api_floating_ip
|
||||
depends_on = [openstack_networking_router_interface_v2.nodes_router_interface]
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_associate_v2" "ingress_fip" {
|
||||
count = (var.openstack_user_managed_load_balancer || length(var.openstack_ingress_floating_ip) == 0) ? 0 : 1
|
||||
port_id = var.use_ipv6 ? data.openstack_networking_port_ids_v2.ingress_ports.ids[0] : openstack_networking_port_v2.ingress_port[0].id
|
||||
floating_ip = var.openstack_ingress_floating_ip
|
||||
depends_on = [openstack_networking_router_interface_v2.nodes_router_interface]
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_v2" "openshift-external-router" {
|
||||
count = local.create_router
|
||||
description = local.description
|
||||
name = "${var.cluster_id}-external-router"
|
||||
admin_state_up = true
|
||||
external_network_id = join("", data.openstack_networking_network_v2.external_network.*.id)
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_interface_v2" "nodes_router_interface" {
|
||||
count = local.create_router
|
||||
router_id = join("", openstack_networking_router_v2.openshift-external-router.*.id)
|
||||
subnet_id = openstack_networking_subnet_v2.nodes[0].id
|
||||
}
|
||||
@@ -1,568 +0,0 @@
|
||||
resource "openstack_networking_secgroup_v2" "master" {
|
||||
name = "${var.cluster_id}-master"
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_mcs" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 22623
|
||||
port_range_max = 22623
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_mcs_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 22623
|
||||
port_range_max = 22623
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
# TODO(mandre) Explicitely enable egress
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_icmp" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "icmp"
|
||||
port_range_min = 0
|
||||
port_range_max = 0
|
||||
# FIXME(mandre) AWS only allows ICMP from cidr_block
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_icmp_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "ipv6-icmp"
|
||||
port_range_min = 0
|
||||
port_range_max = 0
|
||||
# FIXME(mandre) AWS only allows ICMP from cidr_block
|
||||
remote_ip_prefix = "::/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_ssh" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 22
|
||||
port_range_max = 22
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_ssh_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 22
|
||||
port_range_max = 22
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_dns_tcp" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 53
|
||||
port_range_max = 53
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_dns_tcp_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 53
|
||||
port_range_max = 53
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_dns_udp" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 53
|
||||
port_range_max = 53
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_dns_udp_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "udp"
|
||||
port_range_min = 53
|
||||
port_range_max = 53
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_api" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 6443
|
||||
port_range_max = 6443
|
||||
# FIXME(mandre) AWS only allows API port from cidr_block
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_api_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 6443
|
||||
port_range_max = 6443
|
||||
remote_ip_prefix = "::/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_vxlan" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 4789
|
||||
port_range_max = 4789
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_vxlan_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "udp"
|
||||
port_range_min = 4789
|
||||
port_range_max = 4789
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_geneve" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 6081
|
||||
port_range_max = 6081
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_geneve_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "udp"
|
||||
port_range_min = 6081
|
||||
port_range_max = 6081
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_ike" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 500
|
||||
port_range_max = 500
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_ike_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "udp"
|
||||
port_range_min = 500
|
||||
port_range_max = 500
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_ike_nat_t" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 4500
|
||||
port_range_max = 4500
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_esp" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "esp"
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_esp_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "esp"
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_ovndb" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 6641
|
||||
port_range_max = 6642
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_ovndb_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 6641
|
||||
port_range_max = 6642
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_internal" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 9000
|
||||
port_range_max = 9999
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_internal_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 9000
|
||||
port_range_max = 9999
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_internal_udp" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 9000
|
||||
port_range_max = 9999
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_internal_udp_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "udp"
|
||||
port_range_min = 9000
|
||||
port_range_max = 9999
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_kube_scheduler" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10259
|
||||
port_range_max = 10259
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_kube_scheduler_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10259
|
||||
port_range_max = 10259
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_kube_controller_manager" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10257
|
||||
port_range_max = 10257
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_kube_controller_manager_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10257
|
||||
port_range_max = 10257
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_kubelet_secure" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10250
|
||||
port_range_max = 10250
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_kubelet_secure_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10250
|
||||
port_range_max = 10250
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_etcd" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 2379
|
||||
port_range_max = 2380
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_etcd_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 2379
|
||||
port_range_max = 2380
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_services_tcp" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 30000
|
||||
port_range_max = 32767
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_services_tcp_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 30000
|
||||
port_range_max = 32767
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_services_udp" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 30000
|
||||
port_range_max = 32767
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_services_udp_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "udp"
|
||||
port_range_min = 30000
|
||||
port_range_max = 32767
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_vrrp" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
# Explicitly set the vrrp protocol number to prevent cases when the Neutron Plugin
|
||||
# is disabled and it cannot identify a number by name.
|
||||
protocol = "112"
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_vrrp_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
# Explicitly set the vrrp protocol number to prevent cases when the Neutron Plugin
|
||||
# is disabled and it cannot identify a number by name.
|
||||
protocol = "112"
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_http" {
|
||||
count = var.masters_schedulable ? 1 : 0
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 80
|
||||
port_range_max = 80
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_https" {
|
||||
count = var.masters_schedulable ? 1 : 0
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 443
|
||||
port_range_max = 443
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_http_v6" {
|
||||
count = (var.masters_schedulable && length(var.machine_v6_cidrs) > 0) ? 1 : 0
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 80
|
||||
port_range_max = 80
|
||||
remote_ip_prefix = "::/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_https_v6" {
|
||||
count = (var.masters_schedulable && length(var.machine_v6_cidrs) > 0) ? 1 : 0
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 443
|
||||
port_range_max = 443
|
||||
remote_ip_prefix = "::/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_router" {
|
||||
count = var.masters_schedulable ? length(var.machine_v4_cidrs) : 0
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 1936
|
||||
port_range_max = 1936
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_router_v6" {
|
||||
count = (var.masters_schedulable && length(var.machine_v6_cidrs) > 0) ? 1 : 0
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 1936
|
||||
port_range_max = 1936
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
description = local.description
|
||||
}
|
||||
@@ -1,374 +0,0 @@
|
||||
resource "openstack_networking_secgroup_v2" "worker" {
|
||||
name = "${var.cluster_id}-worker"
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
description = local.description
|
||||
}
|
||||
|
||||
# TODO(mandre) Explicitely enable egress
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_icmp" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "icmp"
|
||||
port_range_min = 0
|
||||
port_range_max = 0
|
||||
# FIXME(mandre) AWS only allows ICMP from cidr_block
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_icmp_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "ipv6-icmp"
|
||||
port_range_min = 0
|
||||
port_range_max = 0
|
||||
# FIXME(mandre) AWS only allows ICMP from cidr_block
|
||||
remote_ip_prefix = "::/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_ssh" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 22
|
||||
port_range_max = 22
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_ssh_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 22
|
||||
port_range_max = 22
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_http" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 80
|
||||
port_range_max = 80
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_http_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 80
|
||||
port_range_max = 80
|
||||
remote_ip_prefix = "::/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_https" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 443
|
||||
port_range_max = 443
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_https_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 443
|
||||
port_range_max = 443
|
||||
remote_ip_prefix = "::/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_router" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 1936
|
||||
port_range_max = 1936
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_router_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 1936
|
||||
port_range_max = 1936
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_vxlan" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 4789
|
||||
port_range_max = 4789
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_vxlan_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "udp"
|
||||
port_range_min = 4789
|
||||
port_range_max = 4789
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_geneve" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 6081
|
||||
port_range_max = 6081
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_geneve_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "udp"
|
||||
port_range_min = 6081
|
||||
port_range_max = 6081
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_ike" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 500
|
||||
port_range_max = 500
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_ike_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "udp"
|
||||
port_range_min = 500
|
||||
port_range_max = 500
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_ike_nat_t" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 4500
|
||||
port_range_max = 4500
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_esp" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "esp"
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_esp_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "esp"
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_internal" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 9000
|
||||
port_range_max = 9999
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_internal_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 9000
|
||||
port_range_max = 9999
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_internal_udp" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 9000
|
||||
port_range_max = 9999
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_internal_udp_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "udp"
|
||||
port_range_min = 9000
|
||||
port_range_max = 9999
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_kubelet_insecure" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10250
|
||||
port_range_max = 10250
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_kubelet_insecure_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10250
|
||||
port_range_max = 10250
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_services_tcp" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 30000
|
||||
port_range_max = 32767
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_services_tcp_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 30000
|
||||
port_range_max = 32767
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_services_udp" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 30000
|
||||
port_range_max = 32767
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_services_udp_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "udp"
|
||||
port_range_min = 30000
|
||||
port_range_max = 32767
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_vrrp" {
|
||||
count = length(var.machine_v4_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
# Explicitly set the vrrp protocol number to prevent cases when the Neutron Plugin
|
||||
# is disabled and it cannot identify a number by name.
|
||||
protocol = "112"
|
||||
remote_ip_prefix = element(var.machine_v4_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_vrrp_v6" {
|
||||
count = length(var.machine_v6_cidrs)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
# Explicitly set the vrrp protocol number to prevent cases when the Neutron Plugin
|
||||
# is disabled and it cannot identify a number by name.
|
||||
protocol = "112"
|
||||
remote_ip_prefix = element(var.machine_v6_cidrs, count.index)
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
description = local.description
|
||||
}
|
||||
@@ -1,396 +0,0 @@
|
||||
variable "openstack_master_root_volume_size" {
|
||||
type = number
|
||||
default = null
|
||||
description = "The size of the volume in gigabytes for the root block device of master nodes."
|
||||
}
|
||||
|
||||
variable "openstack_base_image_name" {
|
||||
type = string
|
||||
description = "Name of the base image to use for the nodes."
|
||||
}
|
||||
|
||||
variable "openstack_bootstrap_shim_ignition" {
|
||||
type = string
|
||||
default = ""
|
||||
description = "Generated pointer/shim ignition config with user ca bundle."
|
||||
}
|
||||
|
||||
variable "openstack_credentials_auth_url" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
required if cloud is not specified) The Identity authentication URL. If omitted, the OS_AUTH_URL environment variable is used.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_cert" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
Specify client certificate file for SSL client authentication. You can specify either a path to the file or the contents of the certificate. If omitted the OS_CERT environment variable is used.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_cloud" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
required if auth_url is not specified) An entry in a clouds.yaml file. See the openstacksdk(https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html#config-files) documentation for more information about clouds.yaml files. If omitted, the OS_CLOUD environment variable is used.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_domain_id" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
The ID of the Domain to scope to (Identity v3). If omitted, the OS_DOMAIN_ID environment variable is checked.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_domain_name" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
The Name of the Domain to scope to (Identity v3). If omitted, the following environment variables are checked (in this order): OS_DOMAIN_NAME, OS_DEFAULT_DOMAIN.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_endpoint_type" {
|
||||
type = string
|
||||
default = "public"
|
||||
|
||||
description = <<EOF
|
||||
Specify which type of endpoint to use from the service catalog. It can be set using the OS_ENDPOINT_TYPE environment variable. If not set, public endpoints is used.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_insecure" {
|
||||
default = false
|
||||
|
||||
description = <<EOF
|
||||
Trust self-signed SSL certificates. If omitted, the OS_INSECURE environment variable is used.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_key" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
Specify client private key file for SSL client authentication. You can specify either a path to the file or the contents of the key. If omitted the OS_KEY environment variable is used.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_password" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
The Password to login with. If omitted, the OS_PASSWORD environment variable is used.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_project_domain_id" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
The domain ID where the project is located If omitted, the OS_PROJECT_DOMAIN_ID environment variable is checked.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_project_domain_name" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
The domain name where the project is located. If omitted, the OS_PROJECT_DOMAIN_NAME environment variable is checked.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_region" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
The region of the OpenStack cloud to use. If omitted, the OS_REGION_NAME environment variable is used. If OS_REGION_NAME is not set, then no region will be used. It should be possible to omit the region in single-region OpenStack environments, but this behavior may vary depending on the OpenStack environment being used.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_swauth" {
|
||||
default = false
|
||||
|
||||
description = <<EOF
|
||||
Set to true to authenticate against Swauth, a Swift-native authentication system. If omitted, the OS_SWAUTH environment variable is used. You must also set username to the Swauth/Swift username such as username:project. Set the password to the Swauth/Swift key. Finally, set auth_url as the location of the Swift service. Note that this will only work when used with the OpenStack Object Storage resources.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_tenant_id" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
The ID of the Tenant (Identity v2) or Project (Identity v3) to login with. If omitted, the OS_TENANT_ID or OS_PROJECT_ID environment variables are used.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_tenant_name" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
The Name of the Tenant (Identity v2) or Project (Identity v3) to login with. If omitted, the OS_TENANT_NAME or OS_PROJECT_NAME environment variable are used.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_token" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
Required if not using user_name and password) A token is an expiring, temporary means of access issued via the Keystone service. By specifying a token, you do not have to specify a username/password combination, since the token was already created by a username/password out of band of Terraform. If omitted, the OS_TOKEN or OS_AUTH_TOKEN environment variables are used.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_use_octavia" {
|
||||
default = false
|
||||
|
||||
description = <<EOF
|
||||
If set to true, API requests will go the Load Balancer service (Octavia) instead of the Networking service (Neutron).
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_user_domain_id" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
The domain ID where the user is located. If omitted, the OS_USER_DOMAIN_ID environment variable is checked.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_user_domain_name" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
The domain name where the user is located. If omitted, the OS_USER_DOMAIN_NAME environment variable is checked.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_user_id" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
The User ID to login with. If omitted, the OS_USER_ID environment variable is used.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_credentials_user_name" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
The Username to login with. If omitted, the OS_USERNAME environment variable is used.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_external_network" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
(optional) Name of the external network. The network is used to provide
|
||||
Floating IP access to the deployed nodes. Optional, but either the Name
|
||||
or UUID option must be specified.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_external_network_id" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
(optional) UUID of the external network. The network is used to provide
|
||||
Floating IP access to the deployed nodes. Optional, but either the Name
|
||||
or UUID option must be specified.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_master_extra_sg_ids" {
|
||||
type = list(string)
|
||||
default = []
|
||||
|
||||
description = <<EOF
|
||||
(optional) List of additional security group IDs for master nodes.
|
||||
|
||||
Example: `["sg-51530134", "sg-b253d7cc"]`
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_api_floating_ip" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
(optional) Existing Floating IP to attach to the OpenShift API created by the installer.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_ingress_floating_ip" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = <<EOF
|
||||
(optional) Existing Floating IP to attach to the ingress port created by the installer.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_api_int_ips" {
|
||||
type = list(string)
|
||||
description = "IPs on the node subnets reserved for api-int VIP."
|
||||
}
|
||||
|
||||
variable "openstack_ingress_ips" {
|
||||
type = list(string)
|
||||
description = "IPs on the nodes subnets reserved for the ingress VIP."
|
||||
}
|
||||
|
||||
variable "openstack_external_dns" {
|
||||
type = list(string)
|
||||
description = "IP addresses of exernal dns servers to add to networks."
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "openstack_additional_network_ids" {
|
||||
type = list(string)
|
||||
description = "IDs of additional networks for master nodes."
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "openstack_additional_ports" {
|
||||
type = list(list(object({
|
||||
network_id = string
|
||||
fixed_ips = list(object({
|
||||
subnet_id = string
|
||||
ip_address = string
|
||||
}))
|
||||
})))
|
||||
description = "Additional ports for each master node."
|
||||
default = [[], [], []]
|
||||
}
|
||||
|
||||
variable "openstack_master_flavor_name" {
|
||||
type = string
|
||||
description = "Instance size for the master node(s). Example: `m1.medium`."
|
||||
}
|
||||
|
||||
variable "openstack_octavia_support" {
|
||||
type = bool
|
||||
default = false
|
||||
|
||||
description = <<EOF
|
||||
False if the OpenStack Octavia endpoint is missing and True if it exists.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "openstack_master_server_group_name" {
|
||||
type = string
|
||||
description = "Name of the server group for the master nodes."
|
||||
}
|
||||
|
||||
variable "openstack_master_server_group_policy" {
|
||||
type = string
|
||||
description = "Policy of the server group for the master nodes."
|
||||
}
|
||||
|
||||
variable "openstack_default_machines_port" {
|
||||
type = object({
|
||||
network_id = string
|
||||
fixed_ips = list(object({
|
||||
subnet_id = string
|
||||
ip_address = string
|
||||
}))
|
||||
})
|
||||
default = null
|
||||
description = "The masters' default control-plane port. If empty, the installer will create a new network."
|
||||
}
|
||||
|
||||
variable "openstack_machines_ports" {
|
||||
type = list(object({
|
||||
network_id = string
|
||||
fixed_ips = list(object({
|
||||
subnet_id = string
|
||||
ip_address = string
|
||||
}))
|
||||
}))
|
||||
description = "The control-plane port for each machine. If null, the default is used."
|
||||
default = [null, null, null]
|
||||
}
|
||||
|
||||
variable "openstack_master_availability_zones" {
|
||||
type = list(string)
|
||||
default = [""]
|
||||
description = "List of availability Zones to Schedule the masters on."
|
||||
}
|
||||
|
||||
variable "openstack_master_root_volume_availability_zones" {
|
||||
type = list(string)
|
||||
default = [""]
|
||||
description = "List of availability Zones to Schedule the masters root volumes on."
|
||||
}
|
||||
|
||||
variable "openstack_master_root_volume_types" {
|
||||
type = list(string)
|
||||
default = [""]
|
||||
description = "List of volume types used by the masters root volumes."
|
||||
}
|
||||
|
||||
variable "openstack_worker_server_group_names" {
|
||||
type = set(string)
|
||||
default = []
|
||||
description = "Names of the server groups for the worker nodes."
|
||||
}
|
||||
|
||||
variable "openstack_worker_server_group_policy" {
|
||||
type = string
|
||||
description = "Policy of the server groups for the worker nodes."
|
||||
}
|
||||
|
||||
variable "openstack_user_managed_load_balancer" {
|
||||
type = bool
|
||||
description = "True if the load balancer that is used for the control plane VIPs is managed by the user."
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
# This file just uses aliases defined in OWNERS_ALIASES.
|
||||
|
||||
approvers:
|
||||
- ovirt-approvers
|
||||
reviewers:
|
||||
- ovirt-reviewers
|
||||
@@ -1,45 +0,0 @@
|
||||
provider "ovirt" {
|
||||
url = var.ovirt_url
|
||||
username = var.ovirt_username
|
||||
password = var.ovirt_password
|
||||
tls_ca_files = var.ovirt_cafile == "" ? [] : [var.ovirt_cafile]
|
||||
tls_ca_bundle = var.ovirt_ca_bundle
|
||||
tls_insecure = var.ovirt_insecure
|
||||
}
|
||||
|
||||
resource "ovirt_vm" "bootstrap" {
|
||||
name = "${var.cluster_id}-bootstrap"
|
||||
cluster_id = var.ovirt_cluster_id
|
||||
template_id = var.release_image_template_id
|
||||
|
||||
memory = 8 * 1024 * 1024 * 1024
|
||||
cpu_cores = 4
|
||||
cpu_threads = 1
|
||||
cpu_sockets = 1
|
||||
|
||||
initialization_custom_script = var.ignition_bootstrap
|
||||
}
|
||||
|
||||
resource "ovirt_tag" "cluster_bootstrap_tag" {
|
||||
name = "${var.cluster_id}-bootstrap"
|
||||
}
|
||||
|
||||
resource "ovirt_vm_tag" "cluster_bootstrap_tag" {
|
||||
tag_id = ovirt_tag.cluster_bootstrap_tag.id
|
||||
vm_id = ovirt_vm.bootstrap.id
|
||||
}
|
||||
|
||||
resource "ovirt_vm_tag" "cluster_import_tag" {
|
||||
tag_id = ovirt_tag.cluster_bootstrap_tag.id
|
||||
vm_id = var.tmp_import_vm_id
|
||||
}
|
||||
|
||||
// ovirt_vm_start starts the master nodes.
|
||||
resource "ovirt_vm_start" "bootstrap" {
|
||||
vm_id = ovirt_vm.bootstrap.id
|
||||
|
||||
depends_on = [
|
||||
ovirt_vm_tag.cluster_bootstrap_tag,
|
||||
ovirt_vm_tag.cluster_import_tag,
|
||||
]
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "bootstrap_vm_id" {
|
||||
value = ovirt_vm.bootstrap.id
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
variable "release_image_template_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "tmp_import_vm_id" {
|
||||
type = string
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
resource "ovirt_affinity_group" "affinity_groups" {
|
||||
count = length(var.ovirt_affinity_groups)
|
||||
cluster_id = var.ovirt_cluster_id
|
||||
name = var.ovirt_affinity_groups[count.index]["name"]
|
||||
description = var.ovirt_affinity_groups[count.index]["description"]
|
||||
priority = var.ovirt_affinity_groups[count.index]["priority"]
|
||||
enforcing = var.ovirt_affinity_groups[count.index]["enforcing"]
|
||||
vms_rule {
|
||||
affinity = "negative"
|
||||
enforcing = var.ovirt_affinity_groups[count.index]["enforcing"]
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "ovirt_affinity_group_count" {
|
||||
value = length(ovirt_affinity_group.affinity_groups)
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
variable "ovirt_cluster_id" {
|
||||
type = string
|
||||
description = "The ID of Cluster"
|
||||
}
|
||||
|
||||
variable "ovirt_affinity_groups" {
|
||||
type = list(object({ name = string, priority = number, description = string, enforcing = string }))
|
||||
description = "Control plane affinity groups that will be created."
|
||||
default = []
|
||||
}
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
provider "ovirt" {
|
||||
url = var.ovirt_url
|
||||
username = var.ovirt_username
|
||||
password = var.ovirt_password
|
||||
tls_ca_files = var.ovirt_cafile == "" ? [] : [var.ovirt_cafile]
|
||||
tls_ca_bundle = var.ovirt_ca_bundle
|
||||
tls_insecure = var.ovirt_insecure
|
||||
}
|
||||
|
||||
module "template" {
|
||||
source = "./template"
|
||||
ovirt_cluster_id = var.ovirt_cluster_id
|
||||
cluster_id = var.cluster_id
|
||||
openstack_base_image_name = var.ovirt_base_image_name
|
||||
tmp_import_vm_id = var.tmp_import_vm_id
|
||||
}
|
||||
|
||||
module "affinity_group" {
|
||||
source = "./affinity_group"
|
||||
ovirt_cluster_id = var.ovirt_cluster_id
|
||||
ovirt_affinity_groups = var.ovirt_affinity_groups
|
||||
}
|
||||
|
||||
module "masters" {
|
||||
source = "./masters"
|
||||
master_count = var.master_count
|
||||
ovirt_cluster_id = var.ovirt_cluster_id
|
||||
ovirt_template_id = module.template.releaseimage_template_id
|
||||
ignition_master = var.ignition_master
|
||||
cluster_domain = var.cluster_domain
|
||||
cluster_id = var.cluster_id
|
||||
ovirt_master_instance_type_id = var.ovirt_master_instance_type_id
|
||||
ovirt_master_cores = var.ovirt_master_cores
|
||||
ovirt_master_sockets = var.ovirt_master_sockets
|
||||
ovirt_master_threads = var.ovirt_master_threads
|
||||
ovirt_master_memory = var.ovirt_master_memory
|
||||
ovirt_master_vm_type = var.ovirt_master_vm_type
|
||||
ovirt_master_os_disk_size_gb = var.ovirt_master_os_disk_gb
|
||||
ovirt_master_affinity_groups = var.ovirt_master_affinity_groups
|
||||
ovirt_affinity_group_count = module.affinity_group.ovirt_affinity_group_count
|
||||
ovirt_master_auto_pinning_policy = var.ovirt_master_auto_pinning_policy
|
||||
ovirt_master_hugepages = var.ovirt_master_hugepages
|
||||
ovirt_master_sparse = var.ovirt_master_sparse
|
||||
ovirt_master_clone = var.ovirt_master_clone
|
||||
ovirt_master_format = var.ovirt_master_format
|
||||
ovirt_storage_domain_id = var.ovirt_storage_domain_id
|
||||
}
|
||||
|
||||
@@ -1,131 +0,0 @@
|
||||
data "ovirt_template_disk_attachments" "master" {
|
||||
template_id = var.ovirt_template_id
|
||||
}
|
||||
|
||||
data "ovirt_cluster_hosts" "master" {
|
||||
cluster_id = var.ovirt_cluster_id
|
||||
}
|
||||
|
||||
data "ovirt_affinity_group" "master" {
|
||||
count = var.ovirt_master_affinity_groups == null ? 0 : length(var.ovirt_master_affinity_groups)
|
||||
cluster_id = var.ovirt_cluster_id
|
||||
name = var.ovirt_master_affinity_groups[count.index]
|
||||
depends_on = [var.ovirt_affinity_group_count]
|
||||
}
|
||||
|
||||
locals {
|
||||
vm_affinity_groups = [
|
||||
for pair in setproduct(data.ovirt_affinity_group.master.*.id, ovirt_vm.master.*.id) : {
|
||||
affinity_group_id = pair[0]
|
||||
vm_id = pair[1]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
resource "ovirt_vm_affinity_group" "master" {
|
||||
count = length(local.vm_affinity_groups)
|
||||
vm_id = local.vm_affinity_groups[count.index].vm_id
|
||||
cluster_id = var.ovirt_cluster_id
|
||||
affinity_group_id = local.vm_affinity_groups[count.index].affinity_group_id
|
||||
}
|
||||
|
||||
// ovirt_vm creates the master nodes
|
||||
resource "ovirt_vm" "master" {
|
||||
count = var.master_count
|
||||
name = "${var.cluster_id}-master-${count.index}"
|
||||
cluster_id = var.ovirt_cluster_id
|
||||
template_id = var.ovirt_template_id
|
||||
instance_type_id = var.ovirt_master_instance_type_id != "" ? var.ovirt_master_instance_type_id : null
|
||||
vm_type = var.ovirt_master_vm_type
|
||||
cpu_cores = var.ovirt_master_cores
|
||||
cpu_sockets = var.ovirt_master_sockets
|
||||
cpu_threads = var.ovirt_master_threads
|
||||
|
||||
// if instance type is declared then memory is redundant. Since terraform
|
||||
// doesn't allow to conditionally omit it, it must be passed.
|
||||
// The number passed is multiplied by 4 and becomes the maximum memory the VM can have.
|
||||
memory = var.ovirt_master_instance_type_id != "" || var.ovirt_master_memory == "" ? 16348 * 1024 * 1024 : tonumber(var.ovirt_master_memory) * 1024 * 1024
|
||||
|
||||
huge_pages = var.ovirt_master_hugepages > 0 ? var.ovirt_master_hugepages : null
|
||||
serial_console = var.ovirt_master_vm_type == "high_performance" ? true : null
|
||||
soundcard_enabled = var.ovirt_master_vm_type == "high_performance" ? false : null
|
||||
memory_ballooning = var.ovirt_master_vm_type == "high_performance" ? false : null
|
||||
cpu_mode = var.ovirt_master_vm_type == "high_performance" ? "host_passthrough" : null
|
||||
|
||||
# Here we check if the ovirt_master_clone is set and use that as a bool if yes, default to the VM type otherwise.
|
||||
#
|
||||
# Clone set explicitly -> clone = var.ovirt_master_clone
|
||||
# VM type desktop -> clone = false
|
||||
# VM type server or high performance -> clone = true
|
||||
clone = var.ovirt_master_clone != null ? tobool(var.ovirt_master_clone) : (var.ovirt_master_vm_type == "desktop" ? false : true)
|
||||
|
||||
# Initialization sets the host name and script run when the machine first starts.
|
||||
initialization_hostname = "${var.cluster_id}-master-${count.index}"
|
||||
initialization_custom_script = var.ignition_master
|
||||
|
||||
# Placement policy dictates which hosts this master can run on.
|
||||
#
|
||||
# TODO there may be a bug here since we are pinning the masters to the existing detected hosts and this is never
|
||||
# updated.
|
||||
placement_policy_affinity = var.ovirt_master_auto_pinning_policy != "" && var.ovirt_master_auto_pinning_policy != "none" ? "migratable" : null
|
||||
placement_policy_host_ids = var.ovirt_master_auto_pinning_policy != "" && var.ovirt_master_auto_pinning_policy != "none" ? data.ovirt_cluster_hosts.master.hosts.*.id : null
|
||||
|
||||
# This section overrides the format and sparse option for the disks from the template.
|
||||
dynamic "template_disk_attachment_override" {
|
||||
for_each = data.ovirt_template_disk_attachments.master.disk_attachments
|
||||
content {
|
||||
disk_id = template_disk_attachment_override.value.disk_id
|
||||
format = var.ovirt_master_format != "" ? var.ovirt_master_format : null
|
||||
provisioning = var.ovirt_master_sparse == null ? null : (tobool(var.ovirt_master_sparse) ? "sparse" : "non-sparse")
|
||||
}
|
||||
}
|
||||
depends_on = [var.ovirt_affinity_group_count]
|
||||
}
|
||||
|
||||
data "ovirt_disk_attachments" "master" {
|
||||
count = var.master_count
|
||||
vm_id = ovirt_vm.master.*.id[count.index]
|
||||
}
|
||||
|
||||
// ovirt_vm_disks_resize resizes the master disks to the specified size.
|
||||
resource "ovirt_vm_disks_resize" "master" {
|
||||
count = var.master_count
|
||||
vm_id = ovirt_vm.master.*.id[count.index]
|
||||
size = var.ovirt_master_os_disk_size_gb * 1024 * 1024 * 1024
|
||||
}
|
||||
|
||||
// ovirt_vm_graphic_consoles removes the graphic consoles from non-desktop machines.
|
||||
resource "ovirt_vm_graphics_consoles" "master" {
|
||||
count = var.ovirt_master_vm_type == "high_performance" ? var.master_count : 0
|
||||
vm_id = ovirt_vm.master.*.id[count.index]
|
||||
}
|
||||
|
||||
// ovirt_vm_optimize_cpu_settings auto-optimizes CPU and NUMA alignment on server and HP types
|
||||
resource "ovirt_vm_optimize_cpu_settings" "master" {
|
||||
count = var.ovirt_master_auto_pinning_policy != "" && var.ovirt_master_auto_pinning_policy != "none" ? var.master_count : 0
|
||||
vm_id = ovirt_vm.master.*.id[count.index]
|
||||
}
|
||||
|
||||
// ovirt_vm_start starts the master nodes.
|
||||
resource "ovirt_vm_start" "master" {
|
||||
count = var.master_count
|
||||
vm_id = ovirt_vm.master.*.id[count.index]
|
||||
|
||||
depends_on = [
|
||||
ovirt_vm_graphics_consoles.master,
|
||||
ovirt_vm_optimize_cpu_settings.master,
|
||||
ovirt_vm_disks_resize.master,
|
||||
ovirt_vm_tag.master,
|
||||
ovirt_vm_affinity_group.master,
|
||||
]
|
||||
}
|
||||
|
||||
resource "ovirt_tag" "cluster_tag" {
|
||||
name = var.cluster_id
|
||||
}
|
||||
|
||||
resource "ovirt_vm_tag" "master" {
|
||||
count = length(ovirt_vm.master)
|
||||
tag_id = ovirt_tag.cluster_tag.id
|
||||
vm_id = ovirt_vm.master.*.id[count.index]
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "control_plane_vm_ids" {
|
||||
value = ovirt_vm_start.master.*.vm_id
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
variable "cluster_id" {
|
||||
description = "The ID of Openshift cluster"
|
||||
}
|
||||
|
||||
variable "cluster_domain" {
|
||||
description = "The domain name of Openshift cluster"
|
||||
}
|
||||
|
||||
variable "master_count" {
|
||||
type = string
|
||||
description = "Number of masters"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "ovirt_cluster_id" {
|
||||
type = string
|
||||
description = "The ID of Cluster"
|
||||
}
|
||||
|
||||
variable "ovirt_template_id" {
|
||||
type = string
|
||||
description = "The ID of VM template"
|
||||
}
|
||||
|
||||
variable "ignition_master" {
|
||||
type = string
|
||||
description = "master ignition config"
|
||||
}
|
||||
|
||||
variable "ovirt_master_memory" {
|
||||
type = string
|
||||
description = "master VM memory in MiB"
|
||||
default = 16348 * 1024 * 1024
|
||||
}
|
||||
|
||||
variable "ovirt_master_cores" {
|
||||
type = string
|
||||
description = "master VM number of cores"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "ovirt_master_sockets" {
|
||||
type = string
|
||||
description = "master VM number of sockets"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "ovirt_master_threads" {
|
||||
type = string
|
||||
description = "master VM number of threads"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "ovirt_master_os_disk_size_gb" {
|
||||
type = string
|
||||
description = "master VM disk size in GiB"
|
||||
}
|
||||
|
||||
variable "ovirt_master_vm_type" {
|
||||
type = string
|
||||
description = "master VM type"
|
||||
}
|
||||
|
||||
variable "ovirt_master_instance_type_id" {
|
||||
type = string
|
||||
description = "master VM instance type ID"
|
||||
}
|
||||
|
||||
variable "ovirt_master_affinity_groups" {
|
||||
type = list(string)
|
||||
description = "master VMs affinity groups names"
|
||||
default = []
|
||||
}
|
||||
|
||||
//TODO: REMOVE once we port to TF 0.13 and can use depends_on modules
|
||||
variable "ovirt_affinity_group_count" {
|
||||
type = string
|
||||
description = "create a dependency between affinity_group module to masters module"
|
||||
}
|
||||
|
||||
variable "ovirt_master_auto_pinning_policy" {
|
||||
type = string
|
||||
description = "master VM auto pinning policy"
|
||||
}
|
||||
|
||||
variable "ovirt_master_hugepages" {
|
||||
type = string
|
||||
description = "master VM hugepages size in KiB"
|
||||
}
|
||||
|
||||
variable "ovirt_master_sparse" {
|
||||
type = bool
|
||||
description = "make master VM disks sparse."
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "ovirt_master_clone" {
|
||||
type = bool
|
||||
description = "clone master VM disk from template instead of linking. Defaults to false for desktop ovirt_master_vm_type, true otherwise."
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "ovirt_master_format" {
|
||||
type = string
|
||||
description = "master VM disk format, can be empty, 'raw', or 'cow'"
|
||||
validation {
|
||||
condition = var.ovirt_master_format == "" || var.ovirt_master_format == "cow" || var.ovirt_master_format == "raw"
|
||||
error_message = "The ovirt_master_format must be one of 'raw' or 'cow'."
|
||||
}
|
||||
}
|
||||
|
||||
variable "ovirt_storage_domain_id" {
|
||||
type = string
|
||||
description = "The ID of Storage Domain for the template"
|
||||
validation {
|
||||
condition = var.ovirt_storage_domain_id != ""
|
||||
error_message = "The ovirt_storage_domain_id must not be empty."
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
output "control_plane_vm_ids" {
|
||||
value = module.masters.control_plane_vm_ids
|
||||
}
|
||||
|
||||
output "release_image_template_id" {
|
||||
value = module.template.releaseimage_template_id
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
// this module is responsible to create the unique template
|
||||
// for the openshift cluster and has this properties
|
||||
// 1. the name of the template will be always set after the name
|
||||
// of the openshift cluster (var.cluster_id) i.e 'clustername-4t9hs2'
|
||||
// which the CLUSTER.INFRA_ID
|
||||
// 2. the disk.alias (the disk name) will be set to the releaseImage name
|
||||
// as set by the installer, and in terraform is var.openstack_base_image_name.
|
||||
|
||||
locals {
|
||||
image_name = "${var.cluster_id}-rhcos"
|
||||
}
|
||||
|
||||
// template created using the uploaded image
|
||||
resource "ovirt_template" "releaseimage_template" {
|
||||
count = var.tmp_import_vm_id != "" ? 1 : 0
|
||||
|
||||
// name the template after the openshift cluster id
|
||||
name = local.image_name
|
||||
description = "Template in use by OpenShift. Do not delete!"
|
||||
// create from vm
|
||||
vm_id = var.tmp_import_vm_id
|
||||
}
|
||||
|
||||
// existing template provided by the user
|
||||
data "ovirt_templates" "finalTemplate" {
|
||||
count = var.tmp_import_vm_id == "" ? 1 : 0
|
||||
|
||||
fail_on_empty = true
|
||||
name = var.openstack_base_image_name
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "releaseimage_template_id" {
|
||||
value = var.tmp_import_vm_id == "" ? one(data.ovirt_templates.finalTemplate.0.templates.*.id) : ovirt_template.releaseimage_template.0.id
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
variable "cluster_id" {
|
||||
description = "The ID of Openshift cluster"
|
||||
}
|
||||
|
||||
variable "ovirt_cluster_id" {
|
||||
type = string
|
||||
description = "The ID of Cluster"
|
||||
}
|
||||
|
||||
variable "openstack_base_image_name" {
|
||||
type = string
|
||||
description = "Name of the existing base image to use for the nodes."
|
||||
}
|
||||
|
||||
variable "tmp_import_vm_id" {
|
||||
type = string
|
||||
description = "ID of the temporary VM template created"
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
variable "tmp_import_vm_id" {
|
||||
type = string
|
||||
description = "ID of the temporary VM template created"
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
locals {
|
||||
image_name = "${var.cluster_id}-rhcos"
|
||||
}
|
||||
|
||||
provider "ovirt" {
|
||||
url = var.ovirt_url
|
||||
username = var.ovirt_username
|
||||
password = var.ovirt_password
|
||||
tls_ca_files = var.ovirt_cafile == "" ? [] : [var.ovirt_cafile]
|
||||
tls_ca_bundle = var.ovirt_ca_bundle
|
||||
tls_insecure = var.ovirt_insecure
|
||||
}
|
||||
|
||||
// We are creating a new disk from an image here. The process is a single step because a corrupted upload can cause the
|
||||
// disk to be deleted and may need to be recreated.
|
||||
resource "ovirt_disk_from_image" "releaseimage" {
|
||||
count = length(var.ovirt_base_image_name) == 0 ? 1 : 0
|
||||
|
||||
// source_file provides the source file name to read from.
|
||||
source_file = var.ovirt_base_image_local_file_path
|
||||
|
||||
alias = local.image_name
|
||||
storage_domain_id = var.ovirt_storage_domain_id
|
||||
sparse = true
|
||||
format = "cow"
|
||||
}
|
||||
|
||||
data "ovirt_blank_template" "blank" {}
|
||||
|
||||
resource "ovirt_vm" "tmp_import_vm" {
|
||||
// create the vm for import only when we don't have an existing template
|
||||
count = length(var.ovirt_base_image_name) == 0 ? 1 : 0
|
||||
|
||||
name = "tmpvm-for-${ovirt_disk_from_image.releaseimage.0.alias}"
|
||||
cluster_id = var.ovirt_cluster_id
|
||||
template_id = data.ovirt_blank_template.blank.id
|
||||
os_type = "rhcos_x64"
|
||||
}
|
||||
|
||||
resource "ovirt_disk_attachment" "tmp_import_vm" {
|
||||
count = length(var.ovirt_base_image_name) == 0 ? 1 : 0
|
||||
vm_id = ovirt_vm.tmp_import_vm.0.id
|
||||
disk_id = ovirt_disk_from_image.releaseimage.0.id
|
||||
disk_interface = "virtio_scsi"
|
||||
bootable = true
|
||||
active = true
|
||||
}
|
||||
|
||||
resource "ovirt_nic" "tmp_import_vm" {
|
||||
count = length(var.ovirt_base_image_name) == 0 ? 1 : 0
|
||||
vm_id = ovirt_vm.tmp_import_vm.0.id
|
||||
vnic_profile_id = var.ovirt_vnic_profile_id
|
||||
name = "tmpnic-for-${ovirt_disk_from_image.releaseimage.0.alias}"
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
output "tmp_import_vm_id" {
|
||||
value = length(var.ovirt_base_image_name) == 0 ? ovirt_vm.tmp_import_vm.0.id : ""
|
||||
depends_on = [ovirt_nic.tmp_import_vm, ovirt_disk_attachment.tmp_import_vm]
|
||||
}
|
||||
@@ -1,161 +0,0 @@
|
||||
variable "bootstrap_dns" {
|
||||
type = string
|
||||
default = true
|
||||
description = "Whether to include DNS entries for the bootstrap node or not."
|
||||
}
|
||||
|
||||
variable "ovirt_url" {
|
||||
type = string
|
||||
description = "The Engine URL"
|
||||
}
|
||||
|
||||
variable "ovirt_username" {
|
||||
type = string
|
||||
description = "The name of user to access Engine API"
|
||||
}
|
||||
|
||||
variable "ovirt_password" {
|
||||
type = string
|
||||
description = "The plain password of user to access Engine API"
|
||||
}
|
||||
|
||||
variable "ovirt_cafile" {
|
||||
type = string
|
||||
description = "Path to a file containing the CA certificate for the oVirt engine API in PEM format"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "ovirt_ca_bundle" {
|
||||
type = string
|
||||
description = "The CA certificate for the oVirt engine API in PEM format"
|
||||
}
|
||||
|
||||
variable "ovirt_insecure" {
|
||||
type = bool
|
||||
description = "Disable oVirt engine certificate verification"
|
||||
}
|
||||
|
||||
variable "ovirt_cluster_id" {
|
||||
type = string
|
||||
description = "The ID of Cluster"
|
||||
validation {
|
||||
condition = var.ovirt_cluster_id != ""
|
||||
error_message = "The ovirt_storage_domain_id must not be empty."
|
||||
}
|
||||
}
|
||||
|
||||
variable "ovirt_storage_domain_id" {
|
||||
type = string
|
||||
description = "The ID of Storage Domain for the template"
|
||||
validation {
|
||||
condition = var.ovirt_storage_domain_id != ""
|
||||
error_message = "The ovirt_storage_domain_id must not be empty."
|
||||
}
|
||||
}
|
||||
|
||||
variable "ovirt_base_image_name" {
|
||||
type = string
|
||||
default = ""
|
||||
description = "Name of an existing base image to use for the nodes."
|
||||
}
|
||||
|
||||
variable "ovirt_base_image_local_file_path" {
|
||||
type = string
|
||||
default = ""
|
||||
description = "Local file path of the base image file to use for the nodes."
|
||||
}
|
||||
|
||||
variable "ovirt_network_name" {
|
||||
type = string
|
||||
default = "ovirtmgmt"
|
||||
description = "The name of Logical Network for the selected Engine cluster."
|
||||
}
|
||||
|
||||
variable "ovirt_vnic_profile_id" {
|
||||
type = string
|
||||
description = "The ID of the vNIC profile of Logical Network."
|
||||
}
|
||||
|
||||
variable "ovirt_affinity_groups" {
|
||||
type = list(object({ name = string, priority = number, description = string, enforcing = string }))
|
||||
description = "Affinity groups that will be created"
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "ovirt_master_memory" {
|
||||
type = string
|
||||
description = "master VM memory in MiB"
|
||||
default = 16348 * 1024 * 1024
|
||||
}
|
||||
|
||||
variable "ovirt_master_cores" {
|
||||
type = string
|
||||
description = "master VM number of cores"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "ovirt_master_sockets" {
|
||||
type = string
|
||||
description = "master VM number of sockets"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "ovirt_master_threads" {
|
||||
type = string
|
||||
description = "master VM number of threads"
|
||||
default = 1
|
||||
}
|
||||
|
||||
|
||||
variable "ovirt_master_os_disk_gb" {
|
||||
type = string
|
||||
description = "master VM disk size in GiB"
|
||||
}
|
||||
|
||||
variable "ovirt_master_vm_type" {
|
||||
type = string
|
||||
description = "master VM type"
|
||||
}
|
||||
|
||||
variable "ovirt_master_instance_type_id" {
|
||||
type = string
|
||||
description = "master VM instance type ID"
|
||||
}
|
||||
|
||||
variable "ovirt_master_affinity_groups" {
|
||||
type = list(string)
|
||||
description = "master VMs affinity groups names"
|
||||
}
|
||||
|
||||
variable "ovirt_master_auto_pinning_policy" {
|
||||
type = string
|
||||
default = "none"
|
||||
description = "master VM auto pinning policy"
|
||||
}
|
||||
|
||||
variable "ovirt_master_hugepages" {
|
||||
type = string
|
||||
description = "master VM hugepages size in KiB"
|
||||
}
|
||||
|
||||
variable "ovirt_master_sparse" {
|
||||
type = bool
|
||||
description = "make master VM disks sparse."
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "ovirt_master_clone" {
|
||||
type = bool
|
||||
description = "clone master VM disk from template instead of linking. Defaults to false for desktop ovirt_master_vm_type, true otherwise."
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "ovirt_master_format" {
|
||||
type = string
|
||||
description = "master VM disk format, can be empty, 'raw', or 'cow'"
|
||||
validation {
|
||||
condition = var.ovirt_master_format == "" || var.ovirt_master_format == "cow" || var.ovirt_master_format == "raw"
|
||||
error_message = "The ovirt_master_format must be one of 'raw' or 'cow'."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
provider_installation {
|
||||
filesystem_mirror {
|
||||
path = "plugins"
|
||||
include = ["*/*/*"]
|
||||
}
|
||||
direct {
|
||||
exclude = ["*/*/*"]
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user