mirror of
https://github.com/openshift/installer.git
synced 2026-02-06 00:48:45 +01:00
Remove multiple clusters from config (#3135)
This commit is contained in:
committed by
GitHub
parent
4bc3c28d31
commit
fef5a0f6f9
@@ -1,362 +1,363 @@
|
||||
clusters:
|
||||
- aws:
|
||||
# (optional) Unique name under which the Amazon S3 bucket will be created. Bucket name must start with a lower case name and is limited to 63 characters.
|
||||
# The Tectonic Installer uses the bucket to store tectonic assets and kubeconfig.
|
||||
# If name is not provided the installer will construct the name using "name", current AWS region and "baseDomain"
|
||||
# assetsS3BucketName:
|
||||
aws:
|
||||
# (optional) Unique name under which the Amazon S3 bucket will be created. Bucket name must start with a lower case name and is limited to 63 characters.
|
||||
# The Tectonic Installer uses the bucket to store tectonic assets and kubeconfig.
|
||||
# If name is not provided the installer will construct the name using "name", current AWS region and "baseDomain"
|
||||
# assetsS3BucketName:
|
||||
|
||||
# (optional) Extra AWS tags to be applied to created autoscaling group resources.
|
||||
# This is a list of maps having the keys `key`, `value` and `propagate_at_launch`.
|
||||
#
|
||||
# Example: `[ { key = "foo", value = "bar", propagate_at_launch = true } ]`
|
||||
# autoScalingGroupExtraTags:
|
||||
# (optional) Extra AWS tags to be applied to created autoscaling group resources.
|
||||
# This is a list of maps having the keys `key`, `value` and `propagate_at_launch`.
|
||||
#
|
||||
# Example: `[ { key = "foo", value = "bar", propagate_at_launch = true } ]`
|
||||
# autoScalingGroupExtraTags:
|
||||
|
||||
# (optional) AMI override for all nodes. Example: `ami-foobar123`.
|
||||
# ec2AMIOverride:
|
||||
# (optional) AMI override for all nodes. Example: `ami-foobar123`.
|
||||
# ec2AMIOverride:
|
||||
|
||||
etcd:
|
||||
# Instance size for the etcd node(s). Example: `t2.medium`. Read the [etcd recommended hardware](https:#coreos.com/etcd/docs/latest/op-guide/hardware.html) guide for best performance
|
||||
ec2Type: t2.medium
|
||||
etcd:
|
||||
# Instance size for the etcd node(s). Example: `t2.medium`. Read the [etcd recommended hardware](https:#coreos.com/etcd/docs/latest/op-guide/hardware.html) guide for best performance
|
||||
ec2Type: t2.medium
|
||||
|
||||
# (optional) List of additional security group IDs for etcd nodes.
|
||||
#
|
||||
# Example: `["sg-51530134", "sg-b253d7cc"]`
|
||||
# extraSGIDs:
|
||||
|
||||
# (optional) Name of IAM role to use for the instance profiles of etcd nodes.
|
||||
# The name is also the last part of a role's ARN.
|
||||
#
|
||||
# Example:
|
||||
# * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer
|
||||
# * Role Name = tectonic-installer
|
||||
# iamRoleName:
|
||||
|
||||
rootVolume:
|
||||
# The amount of provisioned IOPS for the root block device of etcd nodes.
|
||||
# Ignored if the volume type is not io1.
|
||||
iops: 100
|
||||
|
||||
# The size of the volume in gigabytes for the root block device of etcd nodes.
|
||||
size: 30
|
||||
|
||||
# The type of volume for the root block device of etcd nodes.
|
||||
type: gp2
|
||||
|
||||
external:
|
||||
# (optional) List of subnet IDs within an existing VPC to deploy master nodes into.
|
||||
# Required to use an existing VPC and the list must match the AZ count.
|
||||
#
|
||||
# Example: `["subnet-111111", "subnet-222222", "subnet-333333"]`
|
||||
# masterSubnetIDs:
|
||||
|
||||
# (optional) If set, the given Route53 zone ID will be used as the internal (private) zone.
|
||||
# This zone will be used to create etcd DNS records as well as internal API and internal Ingress records.
|
||||
# If set, no additional private zone will be created.
|
||||
#
|
||||
# Example: `"Z1ILINNUJGTAO1"`
|
||||
# privateZone:
|
||||
|
||||
# (optional) ID of an existing VPC to launch nodes into.
|
||||
# If unset a new VPC is created.
|
||||
#
|
||||
# Example: `vpc-123456`
|
||||
# vpcID:
|
||||
|
||||
# (optional) List of subnet IDs within an existing VPC to deploy worker nodes into.
|
||||
# Required to use an existing VPC and the list must match the AZ count.
|
||||
#
|
||||
# Example: `["subnet-111111", "subnet-222222", "subnet-333333"]`
|
||||
# workerSubnetIDs:
|
||||
|
||||
# (optional) Extra AWS tags to be applied to created resources.
|
||||
#
|
||||
# Example: `{ "key" = "value", "foo" = "bar" }`
|
||||
# extraTags:
|
||||
|
||||
# (optional) Name of IAM role to use to access AWS in order to deploy the Tectonic Cluster.
|
||||
# The name is also the full role's ARN.
|
||||
#
|
||||
# Example:
|
||||
# * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer
|
||||
# installerRole:
|
||||
|
||||
master:
|
||||
# (optional) This configures master availability zones and their corresponding subnet CIDRs directly.
|
||||
#
|
||||
# Example:
|
||||
# `{ eu-west-1a = "10.0.0.0/20", eu-west-1b = "10.0.16.0/20" }`
|
||||
# customSubnets:
|
||||
|
||||
# Instance size for the master node(s). Example: `t2.medium`.
|
||||
ec2Type: t2.medium
|
||||
|
||||
# (optional) List of additional security group IDs for master nodes.
|
||||
#
|
||||
# Example: `["sg-51530134", "sg-b253d7cc"]`
|
||||
# extraSGIDs:
|
||||
|
||||
# (optional) Name of IAM role to use for the instance profiles of master nodes.
|
||||
# The name is also the last part of a role's ARN.
|
||||
#
|
||||
# Example:
|
||||
# * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer
|
||||
# * Role Name = tectonic-installer
|
||||
# iamRoleName:
|
||||
|
||||
rootVolume:
|
||||
# The amount of provisioned IOPS for the root block device of master nodes.
|
||||
# Ignored if the volume type is not io1.
|
||||
iops: 100
|
||||
|
||||
# The size of the volume in gigabytes for the root block device of master nodes.
|
||||
size: 30
|
||||
|
||||
# The type of volume for the root block device of master nodes.
|
||||
type: gp2
|
||||
|
||||
# (optional) If set to true, create private-facing ingress resources (ELB, A-records).
|
||||
# If set to false, no private-facing ingress resources will be provisioned and all DNS records will be created in the public Route53 zone.
|
||||
# privateEndpoints: true
|
||||
|
||||
# (optional) This declares the AWS credentials profile to use.
|
||||
# profile: default
|
||||
|
||||
# (optional) If set to true, create public-facing ingress resources (ELB, A-records).
|
||||
# If set to false, no public-facing ingress resources will be created.
|
||||
# publicEndpoints: true
|
||||
|
||||
# The target AWS region for the cluster.
|
||||
region: eu-west-1
|
||||
|
||||
# Name of an SSH key located within the AWS region. Example: coreos-user.
|
||||
sshKey:
|
||||
|
||||
# Block of IP addresses used by the VPC.
|
||||
# This should not overlap with any other networks, such as a private datacenter connected via Direct Connect.
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
|
||||
worker:
|
||||
# (optional) This configures worker availability zones and their corresponding subnet CIDRs directly.
|
||||
#
|
||||
# Example: `{ eu-west-1a = "10.0.64.0/20", eu-west-1b = "10.0.80.0/20" }`
|
||||
# customSubnets:
|
||||
|
||||
# Instance size for the worker node(s). Example: `t2.medium`.
|
||||
ec2Type: t2.medium
|
||||
|
||||
# (optional) List of additional security group IDs for worker nodes.
|
||||
#
|
||||
# Example: `["sg-51530134", "sg-b253d7cc"]`
|
||||
# extraSGIDs:
|
||||
|
||||
# (optional) Name of IAM role to use for the instance profiles of worker nodes.
|
||||
# The name is also the last part of a role's ARN.
|
||||
#
|
||||
# Example:
|
||||
# * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer
|
||||
# * Role Name = tectonic-installer
|
||||
# iamRoleName:
|
||||
|
||||
# (optional) List of ELBs to attach all worker instances to.
|
||||
# This is useful for exposing NodePort services via load-balancers managed separately from the cluster.
|
||||
#
|
||||
# Example:
|
||||
# * `["ingress-nginx"]`
|
||||
# loadBalancers:
|
||||
|
||||
rootVolume:
|
||||
# The amount of provisioned IOPS for the root block device of worker nodes.
|
||||
# Ignored if the volume type is not io1.
|
||||
iops: 100
|
||||
|
||||
# The size of the volume in gigabytes for the root block device of worker nodes.
|
||||
size: 30
|
||||
|
||||
# The type of volume for the root block device of worker nodes.
|
||||
type: gp2
|
||||
|
||||
# The base DNS domain of the cluster. It must NOT contain a trailing period. Some
|
||||
# DNS providers will automatically add this if necessary.
|
||||
# (optional) List of additional security group IDs for etcd nodes.
|
||||
#
|
||||
# Example: `openstack.dev.coreos.systems`.
|
||||
# Example: `["sg-51530134", "sg-b253d7cc"]`
|
||||
# extraSGIDs:
|
||||
|
||||
# (optional) Name of IAM role to use for the instance profiles of etcd nodes.
|
||||
# The name is also the last part of a role's ARN.
|
||||
#
|
||||
# Note: This field MUST be set manually prior to creating the cluster.
|
||||
# Example:
|
||||
# * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer
|
||||
# * Role Name = tectonic-installer
|
||||
# iamRoleName:
|
||||
|
||||
rootVolume:
|
||||
# The amount of provisioned IOPS for the root block device of etcd nodes.
|
||||
# Ignored if the volume type is not io1.
|
||||
iops: 100
|
||||
|
||||
# The size of the volume in gigabytes for the root block device of etcd nodes.
|
||||
size: 30
|
||||
|
||||
# The type of volume for the root block device of etcd nodes.
|
||||
type: gp2
|
||||
|
||||
external:
|
||||
# (optional) List of subnet IDs within an existing VPC to deploy master nodes into.
|
||||
# Required to use an existing VPC and the list must match the AZ count.
|
||||
#
|
||||
# Example: `["subnet-111111", "subnet-222222", "subnet-333333"]`
|
||||
# masterSubnetIDs:
|
||||
|
||||
# (optional) If set, the given Route53 zone ID will be used as the internal (private) zone.
|
||||
# This zone will be used to create etcd DNS records as well as internal API and internal Ingress records.
|
||||
# If set, no additional private zone will be created.
|
||||
#
|
||||
# Example: `"Z1ILINNUJGTAO1"`
|
||||
# privateZone:
|
||||
|
||||
# (optional) ID of an existing VPC to launch nodes into.
|
||||
# If unset a new VPC is created.
|
||||
#
|
||||
# Example: `vpc-123456`
|
||||
# vpcID:
|
||||
|
||||
# (optional) List of subnet IDs within an existing VPC to deploy worker nodes into.
|
||||
# Required to use an existing VPC and the list must match the AZ count.
|
||||
#
|
||||
# Example: `["subnet-111111", "subnet-222222", "subnet-333333"]`
|
||||
# workerSubnetIDs:
|
||||
|
||||
# (optional) Extra AWS tags to be applied to created resources.
|
||||
#
|
||||
# Example: `{ "key" = "value", "foo" = "bar" }`
|
||||
# extraTags:
|
||||
|
||||
# (optional) Name of IAM role to use to access AWS in order to deploy the Tectonic Cluster.
|
||||
# The name is also the full role's ARN.
|
||||
#
|
||||
# Example:
|
||||
# * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer
|
||||
# installerRole:
|
||||
|
||||
master:
|
||||
# (optional) This configures master availability zones and their corresponding subnet CIDRs directly.
|
||||
#
|
||||
# Example:
|
||||
# `{ eu-west-1a = "10.0.0.0/20", eu-west-1b = "10.0.16.0/20" }`
|
||||
# customSubnets:
|
||||
|
||||
# Instance size for the master node(s). Example: `t2.medium`.
|
||||
ec2Type: t2.medium
|
||||
|
||||
# (optional) List of additional security group IDs for master nodes.
|
||||
#
|
||||
# Example: `["sg-51530134", "sg-b253d7cc"]`
|
||||
# extraSGIDs:
|
||||
|
||||
# (optional) Name of IAM role to use for the instance profiles of master nodes.
|
||||
# The name is also the last part of a role's ARN.
|
||||
#
|
||||
# Example:
|
||||
# * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer
|
||||
# * Role Name = tectonic-installer
|
||||
# iamRoleName:
|
||||
|
||||
rootVolume:
|
||||
# The amount of provisioned IOPS for the root block device of master nodes.
|
||||
# Ignored if the volume type is not io1.
|
||||
iops: 100
|
||||
|
||||
# The size of the volume in gigabytes for the root block device of master nodes.
|
||||
size: 30
|
||||
|
||||
# The type of volume for the root block device of master nodes.
|
||||
type: gp2
|
||||
|
||||
# (optional) If set to true, create private-facing ingress resources (ELB, A-records).
|
||||
# If set to false, no private-facing ingress resources will be provisioned and all DNS records will be created in the public Route53 zone.
|
||||
# privateEndpoints: true
|
||||
|
||||
# (optional) This declares the AWS credentials profile to use.
|
||||
# profile: default
|
||||
|
||||
# (optional) If set to true, create public-facing ingress resources (ELB, A-records).
|
||||
# If set to false, no public-facing ingress resources will be created.
|
||||
# publicEndpoints: true
|
||||
|
||||
# The target AWS region for the cluster.
|
||||
region: eu-west-1
|
||||
|
||||
# Name of an SSH key located within the AWS region. Example: coreos-user.
|
||||
sshKey:
|
||||
|
||||
# Block of IP addresses used by the VPC.
|
||||
# This should not overlap with any other networks, such as a private datacenter connected via Direct Connect.
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
|
||||
worker:
|
||||
# (optional) This configures worker availability zones and their corresponding subnet CIDRs directly.
|
||||
#
|
||||
# Example: `{ eu-west-1a = "10.0.64.0/20", eu-west-1b = "10.0.80.0/20" }`
|
||||
# customSubnets:
|
||||
|
||||
# Instance size for the worker node(s). Example: `t2.medium`.
|
||||
ec2Type: t2.medium
|
||||
|
||||
# (optional) List of additional security group IDs for worker nodes.
|
||||
#
|
||||
# Example: `["sg-51530134", "sg-b253d7cc"]`
|
||||
# extraSGIDs:
|
||||
|
||||
# (optional) Name of IAM role to use for the instance profiles of worker nodes.
|
||||
# The name is also the last part of a role's ARN.
|
||||
#
|
||||
# Example:
|
||||
# * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer
|
||||
# * Role Name = tectonic-installer
|
||||
# iamRoleName:
|
||||
|
||||
# (optional) List of ELBs to attach all worker instances to.
|
||||
# This is useful for exposing NodePort services via load-balancers managed separately from the cluster.
|
||||
#
|
||||
# Example:
|
||||
# * `["ingress-nginx"]`
|
||||
# loadBalancers:
|
||||
|
||||
rootVolume:
|
||||
# The amount of provisioned IOPS for the root block device of worker nodes.
|
||||
# Ignored if the volume type is not io1.
|
||||
iops: 100
|
||||
|
||||
# The size of the volume in gigabytes for the root block device of worker nodes.
|
||||
size: 30
|
||||
|
||||
# The type of volume for the root block device of worker nodes.
|
||||
type: gp2
|
||||
|
||||
# The base DNS domain of the cluster. It must NOT contain a trailing period. Some
|
||||
# DNS providers will automatically add this if necessary.
|
||||
#
|
||||
# Example: `openstack.dev.coreos.systems`.
|
||||
#
|
||||
# Note: This field MUST be set manually prior to creating the cluster.
|
||||
# This applies only to cloud platforms.
|
||||
#
|
||||
# [Azure-specific NOTE]
|
||||
# To use Azure-provided DNS, `BaseDomain` should be set to `""`
|
||||
# If using DNS records, ensure that `BaseDomain` is set to a properly configured external DNS zone.
|
||||
# Instructions for configuring delegated domains for Azure DNS can be found here: https://docs.microsoft.com/en-us/azure/dns/dns-delegate-domain-azure-dns
|
||||
baseDomain:
|
||||
|
||||
ca:
|
||||
# (optional) The content of the PEM-encoded CA certificate, used to generate Tectonic Console's server certificate.
|
||||
# If left blank, a CA certificate will be automatically generated.
|
||||
# cert:
|
||||
|
||||
# (optional) The content of the PEM-encoded CA key, used to generate Tectonic Console's server certificate.
|
||||
# This field is mandatory if `ca_cert` is set.
|
||||
# key:
|
||||
|
||||
# (optional) The algorithm used to generate ca_key.
|
||||
# The default value is currently recommended.
|
||||
# This field is mandatory if `ca_cert` is set.
|
||||
# keyAlg: RSA
|
||||
|
||||
containerLinux:
|
||||
# (optional) The Container Linux update channel.
|
||||
#
|
||||
# Examples: `stable`, `beta`, `alpha`
|
||||
# channel: stable
|
||||
|
||||
# The Container Linux version to use. Set to `latest` to select the latest available version for the selected update channel.
|
||||
#
|
||||
# Examples: `latest`, `1465.6.0`
|
||||
version: latest
|
||||
|
||||
# (optional) A list of PEM encoded CA files that will be installed in /etc/ssl/certs on etcd, master, and worker nodes.
|
||||
# customCAPEMList:
|
||||
|
||||
ddns:
|
||||
key:
|
||||
# (optional) This only applies if you use the modules/dns/ddns module.
|
||||
#
|
||||
# Specifies the RFC2136 Dynamic DNS server key algorithm.
|
||||
# algorithm:
|
||||
|
||||
# (optional) This only applies if you use the modules/dns/ddns module.
|
||||
#
|
||||
# Specifies the RFC2136 Dynamic DNS server key name.
|
||||
# name:
|
||||
|
||||
# (optional) This only applies if you use the modules/dns/ddns module.
|
||||
#
|
||||
# Specifies the RFC2136 Dynamic DNS server key secret.
|
||||
# secret:
|
||||
|
||||
# (optional) This only applies if you use the modules/dns/ddns module.
|
||||
#
|
||||
# Specifies the RFC2136 Dynamic DNS server IP/host to register IP addresses to.
|
||||
# server:
|
||||
|
||||
# (optional) DNS prefix used to construct the console and API server endpoints.
|
||||
# dnsName:
|
||||
|
||||
etcd:
|
||||
external:
|
||||
# (optional) The path of the file containing the CA certificate for TLS communication with etcd.
|
||||
#
|
||||
# Note: This works only when used in conjunction with an external etcd cluster.
|
||||
# If set, the variable `servers` must also be set.
|
||||
# caCertPath: /dev/null
|
||||
|
||||
# (optional) The path of the file containing the client certificate for TLS communication with etcd.
|
||||
#
|
||||
# Note: This works only when used in conjunction with an external etcd cluster.
|
||||
# If set, the variables `servers`, `caCertPath`, and `clientKeyPath` must also be set.
|
||||
# clientCertPath: /dev/null
|
||||
|
||||
# (optional) The path of the file containing the client key for TLS communication with etcd.
|
||||
#
|
||||
# Note: This works only when used in conjunction with an external etcd cluster.
|
||||
# If set, the variables `servers`, `caCertPath`, and `clientCertPath` must also be set.
|
||||
# clientKeyPath: /dev/null
|
||||
|
||||
# (optional) List of external etcd v3 servers to connect with (hostnames/IPs only).
|
||||
# Needs to be set if using an external etcd cluster.
|
||||
# Note: If this variable is defined, the installer will not create self-signed certs.
|
||||
# To provide a CA certificate to trust the etcd servers, set "caCertPath".
|
||||
#
|
||||
# Example: `["etcd1", "etcd2", "etcd3"]`
|
||||
# servers:
|
||||
|
||||
iscsi:
|
||||
# (optional) Start iscsid.service to enable iscsi volume attachment.
|
||||
# enabled: false
|
||||
|
||||
# The path to the tectonic licence file.
|
||||
# You can download the Tectonic license file from your Account overview page at [1].
|
||||
#
|
||||
# [1] https://account.coreos.com/overview
|
||||
licensePath:
|
||||
|
||||
# The name of the cluster.
|
||||
# If used in a cloud-environment, this will be prepended to `baseDomain` resulting in the URL to the Tectonic console.
|
||||
#
|
||||
# Note: This field MUST be set manually prior to creating the cluster.
|
||||
# Warning: Special characters in the name like '.' may cause errors on OpenStack platforms due to resource name constraints.
|
||||
name:
|
||||
|
||||
networking:
|
||||
# (optional) This declares the MTU used by Calico.
|
||||
# mtu:
|
||||
|
||||
# (optional) This declares the IP range to assign Kubernetes pod IPs in CIDR notation.
|
||||
# podCIDR: 10.2.0.0/16
|
||||
|
||||
# (optional) This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation.
|
||||
# The maximum size of this IP range is /12
|
||||
# serviceCIDR: 10.3.0.0/16
|
||||
|
||||
# (optional) Configures the network to be used in Tectonic. One of the following values can be used:
|
||||
#
|
||||
# - "flannel": enables overlay networking only. This is implemented by flannel using VXLAN.
|
||||
#
|
||||
# - "canal": enables overlay networking including network policy. Overlay is implemented by flannel using VXLAN. Network policy is implemented by Calico.
|
||||
#
|
||||
# - "calico-ipip": [ALPHA] enables BGP based networking. Routing and network policy is implemented by Calico. Note this has been tested on baremetal installations only.
|
||||
#
|
||||
# - "none": disables the installation of any Pod level networking layer provided by Tectonic. By setting this value, users are expected to deploy their own solution to enable network connectivity for Pods and Services.
|
||||
# type: canal
|
||||
|
||||
nodePools:
|
||||
# The number of etcd nodes to be created.
|
||||
# If set to zero, the count of etcd nodes will be determined automatically.
|
||||
#
|
||||
# Note: This is not supported on bare metal.
|
||||
- count: 0
|
||||
name: etcd
|
||||
|
||||
# The number of master nodes to be created.
|
||||
# This applies only to cloud platforms.
|
||||
#
|
||||
# [Azure-specific NOTE]
|
||||
# To use Azure-provided DNS, `BaseDomain` should be set to `""`
|
||||
# If using DNS records, ensure that `BaseDomain` is set to a properly configured external DNS zone.
|
||||
# Instructions for configuring delegated domains for Azure DNS can be found here: https://docs.microsoft.com/en-us/azure/dns/dns-delegate-domain-azure-dns
|
||||
baseDomain:
|
||||
- count: 1
|
||||
name: master
|
||||
|
||||
ca:
|
||||
# (optional) The content of the PEM-encoded CA certificate, used to generate Tectonic Console's server certificate.
|
||||
# If left blank, a CA certificate will be automatically generated.
|
||||
# cert:
|
||||
# The number of worker nodes to be created.
|
||||
# This applies only to cloud platforms.
|
||||
- count: 3
|
||||
name: worker
|
||||
|
||||
# (optional) The content of the PEM-encoded CA key, used to generate Tectonic Console's server certificate.
|
||||
# This field is mandatory if `ca_cert` is set.
|
||||
# key:
|
||||
# The platform used for deploying.
|
||||
platform: AWS
|
||||
|
||||
# (optional) The algorithm used to generate ca_key.
|
||||
# The default value is currently recommended.
|
||||
# This field is mandatory if `ca_cert` is set.
|
||||
# keyAlg: RSA
|
||||
proxy:
|
||||
# (optional) HTTP proxy address.
|
||||
#
|
||||
# Example: `http://myproxy.example.com`
|
||||
# http:
|
||||
|
||||
containerLinux:
|
||||
# (optional) The Container Linux update channel.
|
||||
#
|
||||
# Examples: `stable`, `beta`, `alpha`
|
||||
# channel: stable
|
||||
# (optional) HTTPS proxy address.
|
||||
#
|
||||
# Example: `http://myproxy.example.com`
|
||||
# https:
|
||||
|
||||
# The Container Linux version to use. Set to `latest` to select the latest available version for the selected update channel.
|
||||
#
|
||||
# Examples: `latest`, `1465.6.0`
|
||||
version: latest
|
||||
# (optional) List of local endpoints that will not use HTTP proxy.
|
||||
#
|
||||
# Example: `["127.0.0.1","localhost",".example.com","10.3.0.1"]`
|
||||
# no:
|
||||
|
||||
# (optional) A list of PEM encoded CA files that will be installed in /etc/ssl/certs on etcd, master, and worker nodes.
|
||||
# customCAPEMList:
|
||||
# The path the pull secret file in JSON format.
|
||||
# This is known to be a "Docker pull secret" as produced by the docker login [1] command.
|
||||
# A sample JSON content is shown in [2].
|
||||
# You can download the pull secret from your Account overview page at [3].
|
||||
#
|
||||
# [1] https://docs.docker.com/engine/reference/commandline/login/
|
||||
#
|
||||
# [2] https://coreos.com/os/docs/latest/registry-authentication.html#manual-registry-auth-setup
|
||||
#
|
||||
# [3] https://account.coreos.com/overview
|
||||
pullSecretPath:
|
||||
|
||||
ddns:
|
||||
key:
|
||||
# (optional) This only applies if you use the modules/dns/ddns module.
|
||||
#
|
||||
# Specifies the RFC2136 Dynamic DNS server key algorithm.
|
||||
# algorithm:
|
||||
|
||||
# (optional) This only applies if you use the modules/dns/ddns module.
|
||||
#
|
||||
# Specifies the RFC2136 Dynamic DNS server key name.
|
||||
# name:
|
||||
|
||||
# (optional) This only applies if you use the modules/dns/ddns module.
|
||||
#
|
||||
# Specifies the RFC2136 Dynamic DNS server key secret.
|
||||
# secret:
|
||||
|
||||
# (optional) This only applies if you use the modules/dns/ddns module.
|
||||
#
|
||||
# Specifies the RFC2136 Dynamic DNS server IP/host to register IP addresses to.
|
||||
# server:
|
||||
|
||||
# (optional) DNS prefix used to construct the console and API server endpoints.
|
||||
# dnsName:
|
||||
|
||||
etcd:
|
||||
# The number of etcd nodes to be created.
|
||||
# If set to zero, the count of etcd nodes will be determined automatically.
|
||||
#
|
||||
# Note: This is not supported on bare metal.
|
||||
count: 0
|
||||
|
||||
external:
|
||||
# (optional) The path of the file containing the CA certificate for TLS communication with etcd.
|
||||
#
|
||||
# Note: This works only when used in conjunction with an external etcd cluster.
|
||||
# If set, the variable `servers` must also be set.
|
||||
# caCertPath: /dev/null
|
||||
|
||||
# (optional) The path of the file containing the client certificate for TLS communication with etcd.
|
||||
#
|
||||
# Note: This works only when used in conjunction with an external etcd cluster.
|
||||
# If set, the variables `servers`, `caCertPath`, and `clientKeyPath` must also be set.
|
||||
# clientCertPath: /dev/null
|
||||
|
||||
# (optional) The path of the file containing the client key for TLS communication with etcd.
|
||||
#
|
||||
# Note: This works only when used in conjunction with an external etcd cluster.
|
||||
# If set, the variables `servers`, `caCertPath`, and `clientCertPath` must also be set.
|
||||
# clientKeyPath: /dev/null
|
||||
|
||||
# (optional) List of external etcd v3 servers to connect with (hostnames/IPs only).
|
||||
# Needs to be set if using an external etcd cluster.
|
||||
# Note: If this variable is defined, the installer will not create self-signed certs.
|
||||
# To provide a CA certificate to trust the etcd servers, set "caCertPath".
|
||||
#
|
||||
# Example: `["etcd1", "etcd2", "etcd3"]`
|
||||
# servers:
|
||||
|
||||
iscsi:
|
||||
# (optional) Start iscsid.service to enable iscsi volume attachment.
|
||||
# enabled: false
|
||||
|
||||
# The path to the tectonic licence file.
|
||||
# You can download the Tectonic license file from your Account overview page at [1].
|
||||
#
|
||||
# [1] https://account.coreos.com/overview
|
||||
licensePath:
|
||||
|
||||
master:
|
||||
# The number of master nodes to be created.
|
||||
# This applies only to cloud platforms.
|
||||
count: 1
|
||||
|
||||
# The name of the cluster.
|
||||
# If used in a cloud-environment, this will be prepended to `baseDomain` resulting in the URL to the Tectonic console.
|
||||
#
|
||||
# Note: This field MUST be set manually prior to creating the cluster.
|
||||
# Warning: Special characters in the name like '.' may cause errors on OpenStack platforms due to resource name constraints.
|
||||
name:
|
||||
|
||||
networking:
|
||||
# (optional) This declares the MTU used by Calico.
|
||||
# mtu:
|
||||
|
||||
# (optional) This declares the IP range to assign Kubernetes pod IPs in CIDR notation.
|
||||
# podCIDR: 10.2.0.0/16
|
||||
|
||||
# (optional) This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation.
|
||||
# The maximum size of this IP range is /12
|
||||
# serviceCIDR: 10.3.0.0/16
|
||||
|
||||
# (optional) Configures the network to be used in Tectonic. One of the following values can be used:
|
||||
#
|
||||
# - "flannel": enables overlay networking only. This is implemented by flannel using VXLAN.
|
||||
#
|
||||
# - "canal": enables overlay networking including network policy. Overlay is implemented by flannel using VXLAN. Network policy is implemented by Calico.
|
||||
#
|
||||
# - "calico-ipip": [ALPHA] enables BGP based networking. Routing and network policy is implemented by Calico. Note this has been tested on baremetal installations only.
|
||||
#
|
||||
# - "none": disables the installation of any Pod level networking layer provided by Tectonic. By setting this value, users are expected to deploy their own solution to enable network connectivity for Pods and Services.
|
||||
# type: canal
|
||||
|
||||
# The platform used for deploying.
|
||||
platform: AWS
|
||||
|
||||
proxy:
|
||||
# (optional) HTTP proxy address.
|
||||
#
|
||||
# Example: `http://myproxy.example.com`
|
||||
# http:
|
||||
|
||||
# (optional) HTTPS proxy address.
|
||||
#
|
||||
# Example: `http://myproxy.example.com`
|
||||
# https:
|
||||
|
||||
# (optional) List of local endpoints that will not use HTTP proxy.
|
||||
#
|
||||
# Example: `["127.0.0.1","localhost",".example.com","10.3.0.1"]`
|
||||
# no:
|
||||
|
||||
# The path the pull secret file in JSON format.
|
||||
# This is known to be a "Docker pull secret" as produced by the docker login [1] command.
|
||||
# A sample JSON content is shown in [2].
|
||||
# You can download the pull secret from your Account overview page at [3].
|
||||
#
|
||||
# [1] https://docs.docker.com/engine/reference/commandline/login/
|
||||
#
|
||||
# [2] https://coreos.com/os/docs/latest/registry-authentication.html#manual-registry-auth-setup
|
||||
#
|
||||
# [3] https://account.coreos.com/overview
|
||||
pullSecretPath:
|
||||
|
||||
# Validity period of the self-signed certificates (in hours).
|
||||
# Default is 3 years.
|
||||
# This setting is ignored if user provided certificates are used.
|
||||
tlsValidityPeriod: 26280
|
||||
|
||||
worker:
|
||||
# The number of worker nodes to be created.
|
||||
# This applies only to cloud platforms.
|
||||
count: 3
|
||||
# Validity period of the self-signed certificates (in hours).
|
||||
# Default is 3 years.
|
||||
# This setting is ignored if user provided certificates are used.
|
||||
tlsValidityPeriod: 26280
|
||||
|
||||
@@ -1,56 +1,55 @@
|
||||
clusters:
|
||||
- name: test
|
||||
baseDomain: cluster.com
|
||||
platform: AWS
|
||||
networking:
|
||||
type: canal
|
||||
mtu: 1480
|
||||
podCIDR: 10.2.0.0/16
|
||||
serviceCIDR: 10.3.0.0/16
|
||||
masters:
|
||||
nodePools:
|
||||
- master
|
||||
workers:
|
||||
nodePools:
|
||||
- worker
|
||||
etcd:
|
||||
nodePools:
|
||||
- etcd
|
||||
tlsValidityPeriod: 26280
|
||||
pullSecretPath: /path/config.json
|
||||
licensePath: /path/tectonic-license.txt
|
||||
containerLinux:
|
||||
channel: stable
|
||||
version: latest
|
||||
admin:
|
||||
email: test@coreos.com
|
||||
password: asd123
|
||||
aws:
|
||||
region: eu-west-1
|
||||
sshKey: tectonic
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
etcd:
|
||||
ec2Type: t2.medium
|
||||
rootVolume:
|
||||
iops: 100
|
||||
size: 30
|
||||
type: gp2
|
||||
master:
|
||||
ec2Type: t2.medium
|
||||
rootVolume:
|
||||
iops: 100
|
||||
size: 30
|
||||
type: gp2
|
||||
worker:
|
||||
ec2Type: t2.medium
|
||||
rootVolume:
|
||||
iops: 100
|
||||
size: 30
|
||||
type: gp2
|
||||
nodePools:
|
||||
- name: master
|
||||
count: 2
|
||||
- name: worker
|
||||
count: 3
|
||||
- name: etcd
|
||||
count: 3
|
||||
name: test
|
||||
baseDomain: cluster.com
|
||||
platform: AWS
|
||||
networking:
|
||||
type: canal
|
||||
mtu: 1480
|
||||
podCIDR: 10.2.0.0/16
|
||||
serviceCIDR: 10.3.0.0/16
|
||||
masters:
|
||||
nodePools:
|
||||
- master
|
||||
workers:
|
||||
nodePools:
|
||||
- worker
|
||||
etcd:
|
||||
nodePools:
|
||||
- etcd
|
||||
tlsValidityPeriod: 26280
|
||||
pullSecretPath: /path/config.json
|
||||
licensePath: /path/tectonic-license.txt
|
||||
containerLinux:
|
||||
channel: stable
|
||||
version: latest
|
||||
admin:
|
||||
email: test@coreos.com
|
||||
password: asd123
|
||||
aws:
|
||||
region: eu-west-1
|
||||
sshKey: tectonic
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
etcd:
|
||||
ec2Type: t2.medium
|
||||
rootVolume:
|
||||
iops: 100
|
||||
size: 30
|
||||
type: gp2
|
||||
master:
|
||||
ec2Type: t2.medium
|
||||
rootVolume:
|
||||
iops: 100
|
||||
size: 30
|
||||
type: gp2
|
||||
worker:
|
||||
ec2Type: t2.medium
|
||||
rootVolume:
|
||||
iops: 100
|
||||
size: 30
|
||||
type: gp2
|
||||
nodePools:
|
||||
- name: master
|
||||
count: 2
|
||||
- name: worker
|
||||
count: 3
|
||||
- name: etcd
|
||||
count: 3
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
clusters:
|
||||
- name: test
|
||||
platform: AWS
|
||||
baseDomain: cluster.com
|
||||
etcd:
|
||||
nodePools:
|
||||
- etcd
|
||||
nodePools:
|
||||
- name: etcd
|
||||
count: 3
|
||||
name: test
|
||||
platform: AWS
|
||||
baseDomain: cluster.com
|
||||
etcd:
|
||||
nodePools:
|
||||
- etcd
|
||||
nodePools:
|
||||
- name: etcd
|
||||
count: 3
|
||||
|
||||
@@ -8,14 +8,13 @@ import (
|
||||
)
|
||||
|
||||
func initConfig(t *testing.T, file string) ConfigGenerator {
|
||||
testConfig, err := config.ParseConfigFile("./fixtures/" + file)
|
||||
cluster, err := config.ParseConfigFile("./fixtures/" + file)
|
||||
if err != nil {
|
||||
t.Errorf("Test case TestUrlFunctions: failed to parse test config, %s", err)
|
||||
}
|
||||
cluster := testConfig.Clusters[0]
|
||||
|
||||
return ConfigGenerator{
|
||||
cluster,
|
||||
*cluster,
|
||||
}
|
||||
}
|
||||
func TestUrlFunctions(t *testing.T) {
|
||||
|
||||
@@ -10,7 +10,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cluster.go",
|
||||
"config.go",
|
||||
"parser.go",
|
||||
"types.go",
|
||||
"validate.go",
|
||||
|
||||
@@ -3,6 +3,8 @@ package config
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/coreos/tectonic-installer/installer/pkg/config/aws"
|
||||
"github.com/coreos/tectonic-installer/installer/pkg/config/azure"
|
||||
"github.com/coreos/tectonic-installer/installer/pkg/config/gcp"
|
||||
@@ -23,6 +25,7 @@ type Cluster struct {
|
||||
DNSName string `json:"tectonic_dns_name,omitempty" yaml:"dnsName,omitempty"`
|
||||
Etcd `json:",inline" yaml:"etcd,omitempty"`
|
||||
ISCSI `json:",inline" yaml:"iscsi,omitempty"`
|
||||
Internal `json:",inline" yaml:"-"`
|
||||
LicensePath string `json:"tectonic_license_path,omitempty" yaml:"licensePath,omitempty"`
|
||||
Master `json:",inline" yaml:"master,omitempty"`
|
||||
Name string `json:"tectonic_cluster_name,omitempty" yaml:"name,omitempty"`
|
||||
@@ -40,7 +43,6 @@ type Cluster struct {
|
||||
metal.Metal `json:",inline" yaml:"metal,omitempty"`
|
||||
openstack.OpenStack `json:",inline" yaml:"openstack,omitempty"`
|
||||
vmware.VMware `json:",inline" yaml:"vmware,omitempty"`
|
||||
Internal `json:",inline" yaml:"-"`
|
||||
}
|
||||
|
||||
// NodeCount will return the number of nodes specified in NodePools with matching names.
|
||||
@@ -71,3 +73,31 @@ func (c *Cluster) TFVars() (string, error) {
|
||||
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
// YAML will return the config for the cluster in yaml format.
|
||||
func (c *Cluster) YAML() (string, error) {
|
||||
c.NodePools = append(c.NodePools, NodePool{
|
||||
Count: c.Etcd.Count,
|
||||
Name: "etcd",
|
||||
})
|
||||
c.Etcd.NodePools = []string{"etcd"}
|
||||
|
||||
c.NodePools = append(c.NodePools, NodePool{
|
||||
Count: c.Master.Count,
|
||||
Name: "master",
|
||||
})
|
||||
c.Master.NodePools = []string{"master"}
|
||||
|
||||
c.NodePools = append(c.NodePools, NodePool{
|
||||
Count: c.Worker.Count,
|
||||
Name: "worker",
|
||||
})
|
||||
c.Worker.NodePools = []string{"worker"}
|
||||
|
||||
yaml, err := yaml.Marshal(c)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(yaml), nil
|
||||
}
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
package config
|
||||
|
||||
import "gopkg.in/yaml.v2"
|
||||
|
||||
// Config defines the top level config for a configuration file.
|
||||
type Config struct {
|
||||
Clusters []Cluster `json:",inline" yaml:"clusters,omitempty"`
|
||||
}
|
||||
|
||||
// YAML will return the config for the cluster in yaml format.
|
||||
func (c *Config) YAML() (string, error) {
|
||||
for _, cluster := range c.Clusters {
|
||||
cluster.NodePools = append(cluster.NodePools, NodePool{
|
||||
Count: cluster.Etcd.Count,
|
||||
Name: "etcd",
|
||||
})
|
||||
cluster.Etcd.NodePools = []string{"etcd"}
|
||||
|
||||
cluster.NodePools = append(cluster.NodePools, NodePool{
|
||||
Count: cluster.Master.Count,
|
||||
Name: "master",
|
||||
})
|
||||
cluster.Master.NodePools = []string{"master"}
|
||||
|
||||
cluster.NodePools = append(cluster.NodePools, NodePool{
|
||||
Count: cluster.Worker.Count,
|
||||
Name: "worker",
|
||||
})
|
||||
cluster.Worker.NodePools = []string{"worker"}
|
||||
}
|
||||
|
||||
yaml, err := yaml.Marshal(c)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(yaml), nil
|
||||
}
|
||||
@@ -1,39 +1,24 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Error codes returned by failures to parse a config.
|
||||
var (
|
||||
ErrMultipleClusters = errors.New("multiple cluster configurations are not currently supported")
|
||||
ErrNoClusters = errors.New("no clusters were defined")
|
||||
)
|
||||
// ParseConfig parses a yaml string and returns, if successful, a Cluster.
|
||||
func ParseConfig(data []byte) (*Cluster, error) {
|
||||
cluster := &Cluster{}
|
||||
|
||||
// ParseConfig parses a yaml string and returns, if successful, a Config.
|
||||
func ParseConfig(data []byte) (*Config, error) {
|
||||
config := &Config{}
|
||||
|
||||
if err := yaml.Unmarshal(data, config); err != nil {
|
||||
if err := yaml.Unmarshal(data, cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(config.Clusters) == 0 {
|
||||
return config, ErrNoClusters
|
||||
}
|
||||
|
||||
if len(config.Clusters) > 1 {
|
||||
return config, ErrMultipleClusters
|
||||
}
|
||||
|
||||
return config, nil
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
// ParseConfigFile parses a yaml file and returns, if successful, a Config.
|
||||
func ParseConfigFile(path string) (*Config, error) {
|
||||
// ParseConfigFile parses a yaml file and returns, if successful, a Cluster.
|
||||
func ParseConfigFile(path string) (*Cluster, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -97,6 +97,7 @@ type Worker struct {
|
||||
NodePools []string `json:"-" yaml:"nodePools"`
|
||||
}
|
||||
|
||||
// Internal converts internal related config.
|
||||
type Internal struct {
|
||||
ClusterID string `json:"tectonic_cluster_id,omitempty" yaml:"clusterId"`
|
||||
}
|
||||
|
||||
@@ -32,11 +32,7 @@ func readTFVarsConfigStep(m *metadata) error {
|
||||
}
|
||||
|
||||
func printYAMLConfigStep(m *metadata) error {
|
||||
config := config.Config{
|
||||
Clusters: []config.Cluster{m.cluster},
|
||||
}
|
||||
|
||||
yaml, err := config.YAML()
|
||||
yaml, err := m.cluster.YAML()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,56 +1,55 @@
|
||||
clusters:
|
||||
- admin:
|
||||
email: null
|
||||
password: null
|
||||
aws:
|
||||
etcd:
|
||||
ec2Type: m4.large
|
||||
rootVolume:
|
||||
iops: 100
|
||||
size: 30
|
||||
type: gp2
|
||||
master:
|
||||
ec2Type: m4.large
|
||||
rootVolume:
|
||||
iops: 100
|
||||
size: 30
|
||||
type: gp2
|
||||
region: null
|
||||
sshKey: null
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
worker:
|
||||
ec2Type: m4.large
|
||||
rootVolume:
|
||||
iops: 100
|
||||
size: 30
|
||||
type: gp2
|
||||
baseDomain: tectonic-ci.de
|
||||
containerLinux:
|
||||
channel: beta
|
||||
version: latest
|
||||
etcd:
|
||||
nodePools:
|
||||
- etcd
|
||||
licensePath:
|
||||
master:
|
||||
nodePools:
|
||||
- master
|
||||
name: aws-basic
|
||||
networking:
|
||||
mtu: 1480
|
||||
podCIDR: 10.2.0.0/16
|
||||
serviceCIDR: 10.3.0.0/16
|
||||
type: canal
|
||||
nodePools:
|
||||
- name: etcd
|
||||
count: 3
|
||||
- name: master
|
||||
count: 2
|
||||
- name: worker
|
||||
count: 3
|
||||
platform: AWS
|
||||
pullSecretPath:
|
||||
tlsValidityPeriod: 26280
|
||||
worker:
|
||||
nodePools:
|
||||
- worker
|
||||
admin:
|
||||
email: null
|
||||
password: null
|
||||
aws:
|
||||
etcd:
|
||||
ec2Type: m4.large
|
||||
rootVolume:
|
||||
iops: 100
|
||||
size: 30
|
||||
type: gp2
|
||||
master:
|
||||
ec2Type: m4.large
|
||||
rootVolume:
|
||||
iops: 100
|
||||
size: 30
|
||||
type: gp2
|
||||
region: null
|
||||
sshKey: null
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
worker:
|
||||
ec2Type: m4.large
|
||||
rootVolume:
|
||||
iops: 100
|
||||
size: 30
|
||||
type: gp2
|
||||
baseDomain: tectonic-ci.de
|
||||
containerLinux:
|
||||
channel: beta
|
||||
version: latest
|
||||
etcd:
|
||||
nodePools:
|
||||
- etcd
|
||||
licensePath:
|
||||
master:
|
||||
nodePools:
|
||||
- master
|
||||
name: aws-basic
|
||||
networking:
|
||||
mtu: 1480
|
||||
podCIDR: 10.2.0.0/16
|
||||
serviceCIDR: 10.3.0.0/16
|
||||
type: canal
|
||||
nodePools:
|
||||
- name: etcd
|
||||
count: 3
|
||||
- name: master
|
||||
count: 2
|
||||
- name: worker
|
||||
count: 3
|
||||
platform: AWS
|
||||
pullSecretPath:
|
||||
tlsValidityPeriod: 26280
|
||||
worker:
|
||||
nodePools:
|
||||
- worker
|
||||
|
||||
@@ -39,11 +39,11 @@ func buildInternalStep(m *metadata) error {
|
||||
}
|
||||
|
||||
// fill the internal struct
|
||||
clusterId, err := configgenerator.GenerateClusterID(16)
|
||||
clusterID, err := configgenerator.GenerateClusterID(16)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.cluster.Internal.ClusterID = clusterId
|
||||
m.cluster.Internal.ClusterID = clusterID
|
||||
|
||||
// store the content
|
||||
yamlContent, err := yaml.Marshal(m.cluster.Internal)
|
||||
@@ -81,9 +81,5 @@ func prepareWorspaceStep(m *metadata) error {
|
||||
}
|
||||
|
||||
configFilePath := filepath.Join(m.clusterDir, configFileName)
|
||||
if err := copyFile(m.configFilePath, configFilePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return copyFile(m.configFilePath, configFilePath)
|
||||
}
|
||||
|
||||
@@ -17,10 +17,10 @@ func initTestCluster(file string) (*config.Cluster, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse test config: %v", err)
|
||||
}
|
||||
if len((&testConfig.Clusters[0]).Validate()) != 0 {
|
||||
if len(testConfig.Validate()) != 0 {
|
||||
return nil, errors.New("failed to validate test conifg")
|
||||
}
|
||||
return &testConfig.Clusters[0], nil
|
||||
return testConfig, nil
|
||||
}
|
||||
|
||||
func TestGenerateTerraformVariablesStep(t *testing.T) {
|
||||
|
||||
@@ -141,10 +141,10 @@ func readClusterConfig(configFilePath string, internalFilePath string) (*config.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s is not a valid internal file: %s", internalFilePath, err)
|
||||
}
|
||||
cfg.Clusters[0].Internal = *internal
|
||||
cfg.Internal = *internal
|
||||
}
|
||||
|
||||
return &cfg.Clusters[0], nil
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func readClusterConfigStep(m *metadata) error {
|
||||
|
||||
@@ -18,7 +18,7 @@ class ConfigFile
|
||||
end
|
||||
|
||||
def networking
|
||||
data['clusters'][0]['networking']['type']
|
||||
data['networking']['type']
|
||||
end
|
||||
|
||||
def node_count
|
||||
@@ -26,79 +26,79 @@ class ConfigFile
|
||||
end
|
||||
|
||||
def master_count
|
||||
get_node_count(data['clusters'][0]['master']['nodePools'])
|
||||
get_node_count(data['master']['nodePools'])
|
||||
end
|
||||
|
||||
def worker_count
|
||||
get_node_count(data['clusters'][0]['worker']['nodePools'])
|
||||
get_node_count(data['worker']['nodePools'])
|
||||
end
|
||||
|
||||
def etcd_count
|
||||
get_node_count(data['clusters'][0]['etcd']['nodePools'])
|
||||
get_node_count(data['etcd']['nodePools'])
|
||||
end
|
||||
|
||||
def add_worker_node(node_count)
|
||||
new_data = set_node_count(data['clusters'][0]['worker']['nodePools'][0], node_count)
|
||||
new_data = set_node_count(data['worker']['nodePools'][0], node_count)
|
||||
save(new_data)
|
||||
end
|
||||
|
||||
def change_cluster_name(cluster_name)
|
||||
new_data = data
|
||||
new_data['clusters'][0]['name'] = cluster_name
|
||||
new_data['name'] = cluster_name
|
||||
save(new_data)
|
||||
end
|
||||
|
||||
def cluster_name
|
||||
data['clusters'][0]['name']
|
||||
data['name']
|
||||
end
|
||||
|
||||
def change_aws_region(region)
|
||||
new_data = data
|
||||
new_data['clusters'][0]['aws']['region'] = region
|
||||
new_data['aws']['region'] = region
|
||||
save(new_data)
|
||||
end
|
||||
|
||||
def region(platform)
|
||||
data['clusters'][0][platform]['region']
|
||||
data[platform]['region']
|
||||
end
|
||||
|
||||
def change_license(license_path)
|
||||
new_data = data
|
||||
new_data['clusters'][0]['licensePath'] = license_path
|
||||
new_data['licensePath'] = license_path
|
||||
save(new_data)
|
||||
end
|
||||
|
||||
def change_pull_secret(pull_secret_path)
|
||||
new_data = data
|
||||
new_data['clusters'][0]['pullSecretPath'] = pull_secret_path
|
||||
new_data['pullSecretPath'] = pull_secret_path
|
||||
save(new_data)
|
||||
end
|
||||
|
||||
def change_base_domain(base_domain)
|
||||
new_data = data
|
||||
new_data['clusters'][0]['baseDomain'] = base_domain
|
||||
new_data['baseDomain'] = base_domain
|
||||
save(new_data)
|
||||
end
|
||||
|
||||
def license
|
||||
data['clusters'][0]['licensePath']
|
||||
data['licensePath']
|
||||
end
|
||||
|
||||
def pull_secret
|
||||
data['clusters'][0]['pullSecretPath']
|
||||
data['pullSecretPath']
|
||||
end
|
||||
|
||||
def change_admin_credentials(admin_email, admin_passwd)
|
||||
new_data = data
|
||||
new_data['clusters'][0]['admin'] ||= {}
|
||||
new_data['clusters'][0]['admin']['email'] = admin_email
|
||||
new_data['clusters'][0]['admin']['password'] = admin_passwd
|
||||
new_data['admin'] ||= {}
|
||||
new_data['admin']['email'] = admin_email
|
||||
new_data['admin']['password'] = admin_passwd
|
||||
save(new_data)
|
||||
end
|
||||
|
||||
def admin_credentials
|
||||
admin_email = data.dig('clusters', 0, 'admin', 'email')
|
||||
admin_passwd = data.dig('clusters', 0, 'admin', 'password')
|
||||
admin_email = data.dig('admin', 'email')
|
||||
admin_passwd = data.dig('admin', 'password')
|
||||
[admin_email, admin_passwd]
|
||||
end
|
||||
|
||||
@@ -110,13 +110,13 @@ class ConfigFile
|
||||
|
||||
def change_ssh_key(platform, ssh_key)
|
||||
new_data = data
|
||||
new_data['clusters'][0][platform]['sshKey'] = ssh_key
|
||||
new_data[platform]['sshKey'] = ssh_key
|
||||
save(new_data)
|
||||
end
|
||||
|
||||
def platform
|
||||
PLATFORMS.each do |plat|
|
||||
return plat if data['clusters'][0]['platform'].downcase.eql?(plat)
|
||||
return plat if data['platform'].downcase.eql?(plat)
|
||||
end
|
||||
end
|
||||
|
||||
@@ -135,7 +135,7 @@ class ConfigFile
|
||||
def get_node_count(names)
|
||||
count = 0
|
||||
names.each do |name|
|
||||
data['clusters'][0]['nodePools'].each do |n|
|
||||
data['nodePools'].each do |n|
|
||||
count += n['count'] if n['name'] == name
|
||||
end
|
||||
end
|
||||
@@ -144,7 +144,7 @@ class ConfigFile
|
||||
|
||||
def set_node_count(name, count)
|
||||
d = data
|
||||
d['clusters'][0]['nodePools'].each do |n|
|
||||
d['nodePools'].each do |n|
|
||||
if n['name'] == name
|
||||
n['count'] = count
|
||||
break
|
||||
|
||||
@@ -1,47 +1,46 @@
|
||||
clusters:
|
||||
- aws:
|
||||
etcd:
|
||||
ec2Type: m4.large
|
||||
iamRoleName: tf-tectonic-etcd-node
|
||||
rootVolume:
|
||||
size: 32
|
||||
type: gp2
|
||||
master:
|
||||
ec2Type: m4.large
|
||||
iamRoleName: tf-tectonic-master-node
|
||||
rootVolume:
|
||||
size: 32
|
||||
type: gp2
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
worker:
|
||||
ec2Type: m4.large
|
||||
iamRoleName: tf-tectonic-worker-node
|
||||
rootVolume:
|
||||
size: 32
|
||||
type: gp2
|
||||
baseDomain: tectonic-ci.de
|
||||
containerLinux:
|
||||
channel: beta
|
||||
aws:
|
||||
etcd:
|
||||
nodePools:
|
||||
- etcd
|
||||
ec2Type: m4.large
|
||||
iamRoleName: tf-tectonic-etcd-node
|
||||
rootVolume:
|
||||
size: 32
|
||||
type: gp2
|
||||
master:
|
||||
nodePools:
|
||||
- master
|
||||
name: aws-basic
|
||||
networking:
|
||||
mtu: 1480
|
||||
podCIDR: 10.2.0.0/16
|
||||
serviceCIDR: 10.3.0.0/16
|
||||
type: canal
|
||||
ec2Type: m4.large
|
||||
iamRoleName: tf-tectonic-master-node
|
||||
rootVolume:
|
||||
size: 32
|
||||
type: gp2
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
worker:
|
||||
ec2Type: m4.large
|
||||
iamRoleName: tf-tectonic-worker-node
|
||||
rootVolume:
|
||||
size: 32
|
||||
type: gp2
|
||||
baseDomain: tectonic-ci.de
|
||||
containerLinux:
|
||||
channel: beta
|
||||
etcd:
|
||||
nodePools:
|
||||
- etcd
|
||||
master:
|
||||
nodePools:
|
||||
- master
|
||||
name: aws-basic
|
||||
networking:
|
||||
mtu: 1480
|
||||
podCIDR: 10.2.0.0/16
|
||||
serviceCIDR: 10.3.0.0/16
|
||||
type: canal
|
||||
nodePools:
|
||||
- count: 3
|
||||
name: etcd
|
||||
- count: 2
|
||||
name: master
|
||||
- count: 3
|
||||
name: worker
|
||||
platform: AWS
|
||||
worker:
|
||||
nodePools:
|
||||
- worker
|
||||
platform: AWS
|
||||
worker:
|
||||
nodePools:
|
||||
- worker
|
||||
|
||||
@@ -1,113 +1,112 @@
|
||||
clusters:
|
||||
- aws:
|
||||
etcd:
|
||||
ec2Type: m4.large
|
||||
master:
|
||||
ec2Type: m4.large
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
worker:
|
||||
ec2Type: m4.large
|
||||
ca:
|
||||
cert: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFDTCCAvWgAwIBAgIJAIuXq10k2OFlMA0GCSqGSIb3DQEBCwUAMBIxEDAOBgNV
|
||||
BAMMB2Zha2UtY2EwHhcNMTcwMjAxMjIxMzI0WhcNMjcwMTMwMjIxMzI0WjASMRAw
|
||||
DgYDVQQDDAdmYWtlLWNhMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA
|
||||
zzHsB56F6oZjsVBKzfpicsG+mVHQ/QzA4jqRCbQ8Zr12NtUZKnPUVwDoFf4WTfmy
|
||||
Z0u8Uv+6/B/8un3LGsIaJEugPfRboc2oZKJcqfMJSFfLb/wkmT0D/1HJR60ml/M5
|
||||
wpHeh4vQ7BhktNsK90EjdlLvr1GDfevXArnye5ksEInOSX9nXVsGPrm0AGSffhmY
|
||||
uUAjY8f9IspJa1j4vL6NI89GWO4jqME+SUnuI4SYIkuQJoSElofAIX2b5Tk3dFya
|
||||
VKmAq2L89teCMYsciPbFa/Z2HvDNZ7pC17Ow7zr1f+V5BU18h3cLk610YNPcEBw0
|
||||
f94+mePsmMSMjUM0f+NMFyDERF+pys60/3qqVWrJe/FkJM6NDCyWXXXAfTxIwLq0
|
||||
CVrlWALdTc+RMAPI2sxAdUp4BqAuek4SjIg3FuoJrBs3EAUPfybclJ7g3HJwyXM2
|
||||
3WIe10BnSk+rGzd4KMVbYw5/nM8Nc/Y20R2an/vVZn6xTxs9o6hhEHF7d5iws6Bi
|
||||
7/jv+jdZhLG8b3sG6Tj7a7YdvKWqH/mSPFlc/sevYOjR7NKYRMwGnl0d9qf+Xe5V
|
||||
xyH1llIXPs6+y1B4tRyL/tulyeVqi25+I4QVAYypxWU8CPyw7tsSdOsSTbeGTmXj
|
||||
ehelY/BCjAqAcexL7oRV7dy7VZ1Ezg6zQRwMt0Tar90CAwEAAaNmMGQwHQYDVR0O
|
||||
BBYEFNGPoXTjJnHjG2zMpjSg/9vNO/trMB8GA1UdIwQYMBaAFNGPoXTjJnHjG2zM
|
||||
pjSg/9vNO/trMBIGA1UdEwEB/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgGGMA0G
|
||||
CSqGSIb3DQEBCwUAA4ICAQC9V/0iiEZYHz7xbezHpeGHwmecH5oylEvAeCcN10gx
|
||||
HFvUN+XMyBaPqN7iRtx/rSqyp2iN2AK1Cdn1viOSRc09lwPiuj9V4diSDyPwJWxd
|
||||
60gqd5E9F9gQXlenWoIdm7kW8Lo8HLfx8ItYKGpE51JUctTmGY5WURRmBlVKr1LA
|
||||
hbVsAWBaGQfPyW1CrFcxxc5mCABxWOxjRjLw8A8c5IXD0Q5C5pRd0BckBHKTdl40
|
||||
owm893oPEQcu/1C432T4vIddVh1Ktq1pd7O/9BPYOaPryzf7076xSwZ0bSuBUGRq
|
||||
Vd3STfu5QRqpMv4dIrhqRofmIUzjOHLRX8Lx2pzgYcMgMQ8O+jM+ETrYD6rsDoLQ
|
||||
uiVSWZK0YFndKzNTA04u57arRumWKqqfS0kkDFayumyv6KaDS6YZdsqSRmaiLAOG
|
||||
F6jchpUtkDhDY0v/Y7jESUneT0hRnqNMPAKJMNhE4hS+1qkcP/ikQQgZl/OWma1z
|
||||
HUyBGT4OGP2T3JIfq12Z4vC5FGVD4aD/frTvPMlifV3i8lKlYZs271JPXUo6ASIA
|
||||
ZSBpV5QilOlE25Q5Lcw0yWmN4KwxqBL9bJ5W9D1I0qhWxaMF78m+8vLIFv+dAylE
|
||||
Od27a+1We/P5ey7WRlwCfuEcFV7nYS/qMykYdQ9fxHSPgTPlrGrSwKstaaIIqOkE
|
||||
kA==
|
||||
-----END CERTIFICATE-----
|
||||
key: |-
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIJKgIBAAKCAgEAzzHsB56F6oZjsVBKzfpicsG+mVHQ/QzA4jqRCbQ8Zr12NtUZ
|
||||
KnPUVwDoFf4WTfmyZ0u8Uv+6/B/8un3LGsIaJEugPfRboc2oZKJcqfMJSFfLb/wk
|
||||
mT0D/1HJR60ml/M5wpHeh4vQ7BhktNsK90EjdlLvr1GDfevXArnye5ksEInOSX9n
|
||||
XVsGPrm0AGSffhmYuUAjY8f9IspJa1j4vL6NI89GWO4jqME+SUnuI4SYIkuQJoSE
|
||||
lofAIX2b5Tk3dFyaVKmAq2L89teCMYsciPbFa/Z2HvDNZ7pC17Ow7zr1f+V5BU18
|
||||
h3cLk610YNPcEBw0f94+mePsmMSMjUM0f+NMFyDERF+pys60/3qqVWrJe/FkJM6N
|
||||
DCyWXXXAfTxIwLq0CVrlWALdTc+RMAPI2sxAdUp4BqAuek4SjIg3FuoJrBs3EAUP
|
||||
fybclJ7g3HJwyXM23WIe10BnSk+rGzd4KMVbYw5/nM8Nc/Y20R2an/vVZn6xTxs9
|
||||
o6hhEHF7d5iws6Bi7/jv+jdZhLG8b3sG6Tj7a7YdvKWqH/mSPFlc/sevYOjR7NKY
|
||||
RMwGnl0d9qf+Xe5VxyH1llIXPs6+y1B4tRyL/tulyeVqi25+I4QVAYypxWU8CPyw
|
||||
7tsSdOsSTbeGTmXjehelY/BCjAqAcexL7oRV7dy7VZ1Ezg6zQRwMt0Tar90CAwEA
|
||||
AQKCAgEAjH2XQ9tThqC1fIerEVvT4WhJ6wA1K0C4kS2RJvlVc3zIaYm5VLXRp2Tv
|
||||
+emeCiVjuPL7sXPBwC+YWIPvcidnPnEhKKFGeMJQilwlZP9srecKBNb9ogJjcX5t
|
||||
cvKPlrzPz4TFVTeS5GPt9UwJdXpvp025RDGLbZi65BhduT01ScmHXQLMfdq4s1OM
|
||||
IDAajZChpAs/c+spU6vCeM2Na73xSfTECI0BFO5jY6KDnQXNeoOuLM/yb3eA6bSY
|
||||
Pqe7WGVqKDn/CzdFu8KJfzqKkLxzRS+LDJPPU6RSqpwnPy/FQ4G/u768z8YCzZHx
|
||||
ta4yK6JUXte9ru+DgFrVyvtk38qpzlNYj5PVPkZxOZaWPALYAa6N53/NSJIZ/Pm6
|
||||
YaLkncTbpjer0zzEULfEngiHl8e8XrySeirmIZ7W1RPVA/k0f4d9rOVxhvtNM4es
|
||||
WaEvCMxC1BOD5e7fX39hI4xjFNjecFSXPLR9RlbTxg0yQAjDfMJYghdNfUgfd8I0
|
||||
QP9UmSdLiUcCWJlZ5uF0UG+HNxcp/ML1z7GTLxYjuqC1gLA1giMD8Y7zJJbIsRKt
|
||||
8ymtlkqoTkO+AMnQ0/Eno2yQ9ed7+guhYdpLuEvH2f+p5yEVtcrYmE/tiombs9Gq
|
||||
twVTeSvmm8uLygQIdI0QeKnRjoM9qX9+5I1EkloB7vhTXSAQ+oECggEBAPuDyEgq
|
||||
B2etxpvveDKOatuRimC+oWQ7eyp6NA8BOaHw+1OgTGPE+807i4/RPlhT9pbTuG16
|
||||
/unH8PnRXijtYEeQdFck9TCYjqwJlThZdokg30g827U6K+UECqd5ffejx9cnRpxu
|
||||
Uke+AfMLdzG7G3EJlGoG76JZyKmow1JKzPhL7qa9YQRWA9dxC+vMjKakWf2Y8OSq
|
||||
tkukYRpbn7VC99v5J8vJsNVFXu419N7h0bj2yQ6t64N/ybPWVfjp5xzc9rsNN14f
|
||||
j1HoeqX/xw3MSUMjJol1L1V6+kHBhws0JsWFnGma+LTnDj4RE8HzohTlQv7Bhsgz
|
||||
2qlW3gizrQEn+FECggEBANLjz4SX0eYF37pbZ1XReqezb9LP8oXiHn8UZpKlRpcF
|
||||
DmaoSa9vcEySjwEq3oiR3Nzny46zLfAAJ7O3K2TI4AS/zcTmQw2G4+WjRf8tTq2x
|
||||
A8SNq5E6p5bbimJC+80cVVfFAGukeQy9149ZW4ldfYTrk+821o5lBXmo9EqyrbqI
|
||||
Nrt/EezSHr/Yai9zSV/VnLZ27nvW7vFlNHqbMGwhTHBY8eX6SEdIsobdjsdGdrUn
|
||||
i331ImodBJ5/3H6OdNGHUbrizzn8Jm8CgZHkA87a9ON8eKHQ/FOb/Md82qDghnQR
|
||||
LfBcoOac357Nprc4F/YGE4MCjXgLmGrgzMkQ0Fwcp80CggEBAM7fwQ/iSf70R2Uh
|
||||
XhsvWyNInaofgj4gUplItKMW3eGehgpt0gdKEdboQE3FzOL4BN5gPNUIEr4Vr9a7
|
||||
aBh/zu5uGdNH2cjj4o4Mv8j+hOobuKwBKrHwrAQOA/lmi77x3sDQVFr8vv61gYL4
|
||||
jkzAWrzqJUHkfJxr/wnVfvqj/d3JDv3kzPS1DynYmPaVY6b5je9yKcnbxF+JUDlO
|
||||
3ZlJAPfVAu+y8JkrGv8SMFxXH5pkmlFRqmKZ7DzYchRvx6HM+cA3CbCIgujbMG5z
|
||||
aLWnrybitaLgWVOU+Fy3oq0Lc0yKLnIKfsDFP8i7YSXpkAph3G4Qnhzz0cnxYmWD
|
||||
7CwERVECggEAaFVKalfOAVXwnLrxwbRUUTll3k8AthnraoWGRZC8/qQCvukNI10n
|
||||
msp7M2GpHLnFIgkPXPbqiC0bdz7smf0DT3Yw7/PXQo70mryPObKJlUbZDVnlgoEZ
|
||||
Pno42Wo4Nv6Ifla5YYfKV3JofcQAlFILckI2OwfPWD1EWy8qRPZnGryfD13LWXWO
|
||||
vuzrg7QundoJoP/v9pacOhMOxoWWjDhhH8fxTQzoy1N891oPdCk5O2BoE5W+Q+89
|
||||
RMkPJhGGW87tsV7alN5ZiVwdDDdZZvJOa2k+KRhCbX7jrTHo2+SYwD1rk9nPxKfh
|
||||
vigSDd0ThaT17D/MC5L5Ag9bYTIPUzLeFQKCAQEAq6RjI5A3Xppq9OFziOjgrUGv
|
||||
2/xIH1NH7hdqGk5V+QRYQdD7Vd9wnF4f0CpIYTR55Mcud4amL3mHcR2IdjhJ4wcL
|
||||
0VnSghllTdzO9dcDQ3cigIkzdikGoC+xPQRXMpt3sWS3BYyJZmjsUG1+TgPOZZeb
|
||||
DInfb96I9euapu9meSrwzYy7R21eFfmVqqIaVkDv4fYfUiJZoSA9JygYul3jMt4p
|
||||
rS7cdWaDaR/EX3aTA1S9S331CFwhzRYC5cj6t+Qz5SIH9czmHFH6STfIvYtkxGvC
|
||||
GtROM9ZeDkO+/LwKbQlkbjuazbPCSWy5/163bPbK1w7PA2Ae7jMaJYtm3wYzQQ==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
keyAlg: RSA
|
||||
containerLinux:
|
||||
channel: beta
|
||||
aws:
|
||||
etcd:
|
||||
nodePools:
|
||||
- etcd
|
||||
ec2Type: m4.large
|
||||
master:
|
||||
nodePools:
|
||||
- master
|
||||
ec2Type: m4.large
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
worker:
|
||||
ec2Type: m4.large
|
||||
ca:
|
||||
cert: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFDTCCAvWgAwIBAgIJAIuXq10k2OFlMA0GCSqGSIb3DQEBCwUAMBIxEDAOBgNV
|
||||
BAMMB2Zha2UtY2EwHhcNMTcwMjAxMjIxMzI0WhcNMjcwMTMwMjIxMzI0WjASMRAw
|
||||
DgYDVQQDDAdmYWtlLWNhMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA
|
||||
zzHsB56F6oZjsVBKzfpicsG+mVHQ/QzA4jqRCbQ8Zr12NtUZKnPUVwDoFf4WTfmy
|
||||
Z0u8Uv+6/B/8un3LGsIaJEugPfRboc2oZKJcqfMJSFfLb/wkmT0D/1HJR60ml/M5
|
||||
wpHeh4vQ7BhktNsK90EjdlLvr1GDfevXArnye5ksEInOSX9nXVsGPrm0AGSffhmY
|
||||
uUAjY8f9IspJa1j4vL6NI89GWO4jqME+SUnuI4SYIkuQJoSElofAIX2b5Tk3dFya
|
||||
VKmAq2L89teCMYsciPbFa/Z2HvDNZ7pC17Ow7zr1f+V5BU18h3cLk610YNPcEBw0
|
||||
f94+mePsmMSMjUM0f+NMFyDERF+pys60/3qqVWrJe/FkJM6NDCyWXXXAfTxIwLq0
|
||||
CVrlWALdTc+RMAPI2sxAdUp4BqAuek4SjIg3FuoJrBs3EAUPfybclJ7g3HJwyXM2
|
||||
3WIe10BnSk+rGzd4KMVbYw5/nM8Nc/Y20R2an/vVZn6xTxs9o6hhEHF7d5iws6Bi
|
||||
7/jv+jdZhLG8b3sG6Tj7a7YdvKWqH/mSPFlc/sevYOjR7NKYRMwGnl0d9qf+Xe5V
|
||||
xyH1llIXPs6+y1B4tRyL/tulyeVqi25+I4QVAYypxWU8CPyw7tsSdOsSTbeGTmXj
|
||||
ehelY/BCjAqAcexL7oRV7dy7VZ1Ezg6zQRwMt0Tar90CAwEAAaNmMGQwHQYDVR0O
|
||||
BBYEFNGPoXTjJnHjG2zMpjSg/9vNO/trMB8GA1UdIwQYMBaAFNGPoXTjJnHjG2zM
|
||||
pjSg/9vNO/trMBIGA1UdEwEB/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgGGMA0G
|
||||
CSqGSIb3DQEBCwUAA4ICAQC9V/0iiEZYHz7xbezHpeGHwmecH5oylEvAeCcN10gx
|
||||
HFvUN+XMyBaPqN7iRtx/rSqyp2iN2AK1Cdn1viOSRc09lwPiuj9V4diSDyPwJWxd
|
||||
60gqd5E9F9gQXlenWoIdm7kW8Lo8HLfx8ItYKGpE51JUctTmGY5WURRmBlVKr1LA
|
||||
hbVsAWBaGQfPyW1CrFcxxc5mCABxWOxjRjLw8A8c5IXD0Q5C5pRd0BckBHKTdl40
|
||||
owm893oPEQcu/1C432T4vIddVh1Ktq1pd7O/9BPYOaPryzf7076xSwZ0bSuBUGRq
|
||||
Vd3STfu5QRqpMv4dIrhqRofmIUzjOHLRX8Lx2pzgYcMgMQ8O+jM+ETrYD6rsDoLQ
|
||||
uiVSWZK0YFndKzNTA04u57arRumWKqqfS0kkDFayumyv6KaDS6YZdsqSRmaiLAOG
|
||||
F6jchpUtkDhDY0v/Y7jESUneT0hRnqNMPAKJMNhE4hS+1qkcP/ikQQgZl/OWma1z
|
||||
HUyBGT4OGP2T3JIfq12Z4vC5FGVD4aD/frTvPMlifV3i8lKlYZs271JPXUo6ASIA
|
||||
ZSBpV5QilOlE25Q5Lcw0yWmN4KwxqBL9bJ5W9D1I0qhWxaMF78m+8vLIFv+dAylE
|
||||
Od27a+1We/P5ey7WRlwCfuEcFV7nYS/qMykYdQ9fxHSPgTPlrGrSwKstaaIIqOkE
|
||||
kA==
|
||||
-----END CERTIFICATE-----
|
||||
key: |-
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIJKgIBAAKCAgEAzzHsB56F6oZjsVBKzfpicsG+mVHQ/QzA4jqRCbQ8Zr12NtUZ
|
||||
KnPUVwDoFf4WTfmyZ0u8Uv+6/B/8un3LGsIaJEugPfRboc2oZKJcqfMJSFfLb/wk
|
||||
mT0D/1HJR60ml/M5wpHeh4vQ7BhktNsK90EjdlLvr1GDfevXArnye5ksEInOSX9n
|
||||
XVsGPrm0AGSffhmYuUAjY8f9IspJa1j4vL6NI89GWO4jqME+SUnuI4SYIkuQJoSE
|
||||
lofAIX2b5Tk3dFyaVKmAq2L89teCMYsciPbFa/Z2HvDNZ7pC17Ow7zr1f+V5BU18
|
||||
h3cLk610YNPcEBw0f94+mePsmMSMjUM0f+NMFyDERF+pys60/3qqVWrJe/FkJM6N
|
||||
DCyWXXXAfTxIwLq0CVrlWALdTc+RMAPI2sxAdUp4BqAuek4SjIg3FuoJrBs3EAUP
|
||||
fybclJ7g3HJwyXM23WIe10BnSk+rGzd4KMVbYw5/nM8Nc/Y20R2an/vVZn6xTxs9
|
||||
o6hhEHF7d5iws6Bi7/jv+jdZhLG8b3sG6Tj7a7YdvKWqH/mSPFlc/sevYOjR7NKY
|
||||
RMwGnl0d9qf+Xe5VxyH1llIXPs6+y1B4tRyL/tulyeVqi25+I4QVAYypxWU8CPyw
|
||||
7tsSdOsSTbeGTmXjehelY/BCjAqAcexL7oRV7dy7VZ1Ezg6zQRwMt0Tar90CAwEA
|
||||
AQKCAgEAjH2XQ9tThqC1fIerEVvT4WhJ6wA1K0C4kS2RJvlVc3zIaYm5VLXRp2Tv
|
||||
+emeCiVjuPL7sXPBwC+YWIPvcidnPnEhKKFGeMJQilwlZP9srecKBNb9ogJjcX5t
|
||||
cvKPlrzPz4TFVTeS5GPt9UwJdXpvp025RDGLbZi65BhduT01ScmHXQLMfdq4s1OM
|
||||
IDAajZChpAs/c+spU6vCeM2Na73xSfTECI0BFO5jY6KDnQXNeoOuLM/yb3eA6bSY
|
||||
Pqe7WGVqKDn/CzdFu8KJfzqKkLxzRS+LDJPPU6RSqpwnPy/FQ4G/u768z8YCzZHx
|
||||
ta4yK6JUXte9ru+DgFrVyvtk38qpzlNYj5PVPkZxOZaWPALYAa6N53/NSJIZ/Pm6
|
||||
YaLkncTbpjer0zzEULfEngiHl8e8XrySeirmIZ7W1RPVA/k0f4d9rOVxhvtNM4es
|
||||
WaEvCMxC1BOD5e7fX39hI4xjFNjecFSXPLR9RlbTxg0yQAjDfMJYghdNfUgfd8I0
|
||||
QP9UmSdLiUcCWJlZ5uF0UG+HNxcp/ML1z7GTLxYjuqC1gLA1giMD8Y7zJJbIsRKt
|
||||
8ymtlkqoTkO+AMnQ0/Eno2yQ9ed7+guhYdpLuEvH2f+p5yEVtcrYmE/tiombs9Gq
|
||||
twVTeSvmm8uLygQIdI0QeKnRjoM9qX9+5I1EkloB7vhTXSAQ+oECggEBAPuDyEgq
|
||||
B2etxpvveDKOatuRimC+oWQ7eyp6NA8BOaHw+1OgTGPE+807i4/RPlhT9pbTuG16
|
||||
/unH8PnRXijtYEeQdFck9TCYjqwJlThZdokg30g827U6K+UECqd5ffejx9cnRpxu
|
||||
Uke+AfMLdzG7G3EJlGoG76JZyKmow1JKzPhL7qa9YQRWA9dxC+vMjKakWf2Y8OSq
|
||||
tkukYRpbn7VC99v5J8vJsNVFXu419N7h0bj2yQ6t64N/ybPWVfjp5xzc9rsNN14f
|
||||
j1HoeqX/xw3MSUMjJol1L1V6+kHBhws0JsWFnGma+LTnDj4RE8HzohTlQv7Bhsgz
|
||||
2qlW3gizrQEn+FECggEBANLjz4SX0eYF37pbZ1XReqezb9LP8oXiHn8UZpKlRpcF
|
||||
DmaoSa9vcEySjwEq3oiR3Nzny46zLfAAJ7O3K2TI4AS/zcTmQw2G4+WjRf8tTq2x
|
||||
A8SNq5E6p5bbimJC+80cVVfFAGukeQy9149ZW4ldfYTrk+821o5lBXmo9EqyrbqI
|
||||
Nrt/EezSHr/Yai9zSV/VnLZ27nvW7vFlNHqbMGwhTHBY8eX6SEdIsobdjsdGdrUn
|
||||
i331ImodBJ5/3H6OdNGHUbrizzn8Jm8CgZHkA87a9ON8eKHQ/FOb/Md82qDghnQR
|
||||
LfBcoOac357Nprc4F/YGE4MCjXgLmGrgzMkQ0Fwcp80CggEBAM7fwQ/iSf70R2Uh
|
||||
XhsvWyNInaofgj4gUplItKMW3eGehgpt0gdKEdboQE3FzOL4BN5gPNUIEr4Vr9a7
|
||||
aBh/zu5uGdNH2cjj4o4Mv8j+hOobuKwBKrHwrAQOA/lmi77x3sDQVFr8vv61gYL4
|
||||
jkzAWrzqJUHkfJxr/wnVfvqj/d3JDv3kzPS1DynYmPaVY6b5je9yKcnbxF+JUDlO
|
||||
3ZlJAPfVAu+y8JkrGv8SMFxXH5pkmlFRqmKZ7DzYchRvx6HM+cA3CbCIgujbMG5z
|
||||
aLWnrybitaLgWVOU+Fy3oq0Lc0yKLnIKfsDFP8i7YSXpkAph3G4Qnhzz0cnxYmWD
|
||||
7CwERVECggEAaFVKalfOAVXwnLrxwbRUUTll3k8AthnraoWGRZC8/qQCvukNI10n
|
||||
msp7M2GpHLnFIgkPXPbqiC0bdz7smf0DT3Yw7/PXQo70mryPObKJlUbZDVnlgoEZ
|
||||
Pno42Wo4Nv6Ifla5YYfKV3JofcQAlFILckI2OwfPWD1EWy8qRPZnGryfD13LWXWO
|
||||
vuzrg7QundoJoP/v9pacOhMOxoWWjDhhH8fxTQzoy1N891oPdCk5O2BoE5W+Q+89
|
||||
RMkPJhGGW87tsV7alN5ZiVwdDDdZZvJOa2k+KRhCbX7jrTHo2+SYwD1rk9nPxKfh
|
||||
vigSDd0ThaT17D/MC5L5Ag9bYTIPUzLeFQKCAQEAq6RjI5A3Xppq9OFziOjgrUGv
|
||||
2/xIH1NH7hdqGk5V+QRYQdD7Vd9wnF4f0CpIYTR55Mcud4amL3mHcR2IdjhJ4wcL
|
||||
0VnSghllTdzO9dcDQ3cigIkzdikGoC+xPQRXMpt3sWS3BYyJZmjsUG1+TgPOZZeb
|
||||
DInfb96I9euapu9meSrwzYy7R21eFfmVqqIaVkDv4fYfUiJZoSA9JygYul3jMt4p
|
||||
rS7cdWaDaR/EX3aTA1S9S331CFwhzRYC5cj6t+Qz5SIH9czmHFH6STfIvYtkxGvC
|
||||
GtROM9ZeDkO+/LwKbQlkbjuazbPCSWy5/163bPbK1w7PA2Ae7jMaJYtm3wYzQQ==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
keyAlg: RSA
|
||||
containerLinux:
|
||||
channel: beta
|
||||
etcd:
|
||||
nodePools:
|
||||
- etcd
|
||||
master:
|
||||
nodePools:
|
||||
- master
|
||||
nodePools:
|
||||
- count: 3
|
||||
name: etcd
|
||||
- count: 2
|
||||
name: master
|
||||
- count: 2
|
||||
name: worker
|
||||
platform: AWS
|
||||
worker:
|
||||
nodePools:
|
||||
- worker
|
||||
platform: AWS
|
||||
worker:
|
||||
nodePools:
|
||||
- worker
|
||||
|
||||
@@ -1,23 +1,22 @@
|
||||
clusters:
|
||||
- aws:
|
||||
etcd:
|
||||
ec2Type: m4.large
|
||||
master:
|
||||
ec2Type: m4.large
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
worker:
|
||||
ec2Type: m4.large
|
||||
containerLinux:
|
||||
channel: beta
|
||||
aws:
|
||||
etcd:
|
||||
ec2Type: m4.large
|
||||
master:
|
||||
nodepools:
|
||||
- master
|
||||
nodePools:
|
||||
ec2Type: m4.large
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
worker:
|
||||
ec2Type: m4.large
|
||||
containerLinux:
|
||||
channel: beta
|
||||
master:
|
||||
nodepools:
|
||||
- master
|
||||
nodePools:
|
||||
- count: 1
|
||||
name: master
|
||||
- count: 2
|
||||
name: worker
|
||||
platform: AWS
|
||||
worker:
|
||||
nodePools:
|
||||
- worker
|
||||
platform: AWS
|
||||
worker:
|
||||
nodePools:
|
||||
- worker
|
||||
|
||||
@@ -1,25 +1,24 @@
|
||||
clusters:
|
||||
- aws:
|
||||
etcd:
|
||||
ec2Type: m4.large
|
||||
master:
|
||||
ec2Type: m4.large
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
worker:
|
||||
ec2Type: m4.large
|
||||
containerLinux:
|
||||
channel: beta
|
||||
aws:
|
||||
etcd:
|
||||
ec2Type: m4.large
|
||||
master:
|
||||
nodepools:
|
||||
- master
|
||||
networking:
|
||||
type: flannel
|
||||
nodePools:
|
||||
ec2Type: m4.large
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
worker:
|
||||
ec2Type: m4.large
|
||||
containerLinux:
|
||||
channel: beta
|
||||
master:
|
||||
nodepools:
|
||||
- master
|
||||
networking:
|
||||
type: flannel
|
||||
nodePools:
|
||||
- count: 1
|
||||
name: master
|
||||
- count: 2
|
||||
name: worker
|
||||
platform: AWS
|
||||
worker:
|
||||
nodePools:
|
||||
- worker
|
||||
platform: AWS
|
||||
worker:
|
||||
nodePools:
|
||||
- worker
|
||||
|
||||
@@ -1,28 +1,27 @@
|
||||
clusters:
|
||||
- aws:
|
||||
etcd:
|
||||
ec2Type: m4.large
|
||||
master:
|
||||
ec2Type: m4.large
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
worker:
|
||||
ec2Type: m4.large
|
||||
containerLinux:
|
||||
channel: beta
|
||||
aws:
|
||||
etcd:
|
||||
nodePools:
|
||||
- etcd
|
||||
ec2Type: m4.large
|
||||
master:
|
||||
nodepools:
|
||||
- master
|
||||
ec2Type: m4.large
|
||||
vpcCIDRBlock: 10.0.0.0/16
|
||||
worker:
|
||||
ec2Type: m4.large
|
||||
containerLinux:
|
||||
channel: beta
|
||||
etcd:
|
||||
nodePools:
|
||||
- etcd
|
||||
master:
|
||||
nodepools:
|
||||
- master
|
||||
nodePools:
|
||||
- count: 1
|
||||
name: etcd
|
||||
- count: 1
|
||||
name: master
|
||||
- count: 2
|
||||
name: worker
|
||||
platform: AWS
|
||||
worker:
|
||||
nodePools:
|
||||
- worker
|
||||
platform: AWS
|
||||
worker:
|
||||
nodePools:
|
||||
- worker
|
||||
|
||||
Reference in New Issue
Block a user