mirror of
https://github.com/openshift/openshift-ansible-contrib.git
synced 2026-02-05 09:45:58 +01:00
merging rhsm-repos
This commit is contained in:
@@ -9,6 +9,9 @@ This repository contains *unsupported* code that can be used in conjunction with
|
||||
- code for provisioning various cloud providers ([GCP](https://github.com/openshift/openshift-ansible-contrib/tree/master/reference-architecture/gcp), [AWS](https://github.com/openshift/openshift-ansible-contrib/tree/master/reference-architecture/aws-ansible), [VMware](https://github.com/openshift/openshift-ansible-contrib/tree/master/reference-architecture/vmware-ansible), [Azure](https://github.com/openshift/openshift-ansible-contrib/tree/master/reference-architecture/azure-ansible), [OpenStack](https://github.com/openshift/openshift-ansible-contrib/tree/master/playbooks/provisioning/openstack) and [Red Hat Virtualization (RHV) / oVirt](https://github.com/openshift/openshift-ansible-contrib/tree/master/reference-architecture/rhv-ansible))
|
||||
- supporting scripts and playbooks for the various [reference architectures](https://github.com/openshift/openshift-ansible-contrib/tree/master/reference-architecture) Red Hat has published
|
||||
|
||||
**NOTE: Some repositories containing scripts and ansible playbooks are
|
||||
deprecated.**
|
||||
|
||||
## Contributing
|
||||
|
||||
If you're submitting a pull request or doing a code review, please
|
||||
|
||||
@@ -1 +1 @@
|
||||
/usr/share/ansible/openshift-ansible/library/rpm_q.py
|
||||
/usr/share/ansible/openshift-ansible/roles/lib_utils/library/rpm_q.py
|
||||
3
reference-architecture/3.9/README.md
Normal file
3
reference-architecture/3.9/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# OpenShift 3.9 on AWS
|
||||
For more information on how to use these playbooks visit the reference architecture document
|
||||
https://access.redhat.com/documentation/en-us/reference_architectures/2018/html/deploying_and_managing_openshift_3.9_on_amazon_web_services/
|
||||
@@ -1,218 +0,0 @@
|
||||
if [ ! "$ocp_ec2_bastion" ]; then
|
||||
export ocp_ec2_bastion=$(aws ec2 run-instances \
|
||||
--image-id ${ocp_ec2ami[1]} \
|
||||
--count 1 \
|
||||
--instance-type $ocp_ec2_bastion_type \
|
||||
--key-name $(echo $ocp_keypair | jq -r '.KeyName') \
|
||||
--security-group-ids $(echo $ocp_awssg_bastion | jq -r '.GroupId') \
|
||||
--subnet-id $(echo $ocp_subnet1_routing | jq -r '.Subnet.SubnetId') \
|
||||
--associate-public-ip-address \
|
||||
--block-device-mappings "DeviceName=/dev/sda1,Ebs={DeleteOnTermination=False,VolumeSize=100}" \
|
||||
--tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=bastion},{Key=Clusterid,Value=$ocp_clusterid}]" \
|
||||
)
|
||||
sleep 30
|
||||
export ocp_ec2_bastioneipassc=$(aws ec2 associate-address \
|
||||
--allocation-id $(echo $ocp_eip0 | jq -r '.AllocationId') \
|
||||
--instance-id $(echo $ocp_ec2_bastion | jq -r '.Instances[].InstanceId'))
|
||||
fi
|
||||
if [ ! "$ocp_ec2_master1" ]; then
|
||||
export ocp_ec2_master1=$(aws ec2 run-instances \
|
||||
--image-id ${ocp_ec2ami[1]} \
|
||||
--count 1 \
|
||||
--instance-type $ocp_ec2_master_type \
|
||||
--key-name $(echo $ocp_keypair | jq -r '.KeyName') \
|
||||
--security-group-ids $(echo $ocp_awssg_master | jq -r '.GroupId') \
|
||||
--subnet-id $(echo $ocp_subnet1 | jq -r '.Subnet.SubnetId') \
|
||||
--block-device-mappings "DeviceName=/dev/sda1,Ebs={DeleteOnTermination=False,VolumeSize=100}" \
|
||||
--tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=master1},{Key=Clusterid,Value=$ocp_clusterid}]" \
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_ec2_master2" ]; then
|
||||
export ocp_ec2_master2=$(aws ec2 run-instances \
|
||||
--image-id ${ocp_ec2ami[1]} \
|
||||
--count 1 \
|
||||
--instance-type $ocp_ec2_master_type \
|
||||
--key-name $(echo $ocp_keypair | jq -r '.KeyName') \
|
||||
--security-group-ids $(echo $ocp_awssg_master | jq -r '.GroupId') \
|
||||
--subnet-id $(echo $ocp_subnet2 | jq -r '.Subnet.SubnetId') \
|
||||
--block-device-mappings "DeviceName=/dev/sda1,Ebs={DeleteOnTermination=False,VolumeSize=100}" \
|
||||
--tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=master2},{Key=Clusterid,Value=$ocp_clusterid}]" \
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_ec2_master3" ]; then
|
||||
export ocp_ec2_master3=$(aws ec2 run-instances \
|
||||
--image-id ${ocp_ec2ami[1]} \
|
||||
--count 1 \
|
||||
--instance-type $ocp_ec2_master_type \
|
||||
--key-name $(echo $ocp_keypair | jq -r '.KeyName') \
|
||||
--security-group-ids $(echo $ocp_awssg_master | jq -r '.GroupId') \
|
||||
--subnet-id $(echo $ocp_subnet3 | jq -r '.Subnet.SubnetId') \
|
||||
--block-device-mappings "DeviceName=/dev/sda1,Ebs={DeleteOnTermination=False,VolumeSize=100}" \
|
||||
--tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=master3},{Key=Clusterid,Value=$ocp_clusterid}]" \
|
||||
)
|
||||
fi
|
||||
if [ "$ocp_ec2_master1" ] && [ "$ocp_ec2_master2" ] && [ "$ocp_ec2_master3" ]; then
|
||||
export ocp_elb_masterextreg=$(aws elb register-instances-with-load-balancer \
|
||||
--load-balancer-name $ocp_clusterid-master-external \
|
||||
--instances \
|
||||
$(echo $ocp_ec2_master1 | jq -r '.Instances[].InstanceId') \
|
||||
$(echo $ocp_ec2_master2 | jq -r '.Instances[].InstanceId') \
|
||||
$(echo $ocp_ec2_master3 | jq -r '.Instances[].InstanceId') \
|
||||
)
|
||||
export ocp_elb_masterintreg=$(aws elb register-instances-with-load-balancer \
|
||||
--load-balancer-name $ocp_clusterid-master-internal \
|
||||
--instances \
|
||||
$(echo $ocp_ec2_master1 | jq -r '.Instances[].InstanceId') \
|
||||
$(echo $ocp_ec2_master2 | jq -r '.Instances[].InstanceId') \
|
||||
$(echo $ocp_ec2_master3 | jq -r '.Instances[].InstanceId') \
|
||||
)
|
||||
fi
|
||||
|
||||
if [ ! "$ocp_ec2_infra1" ]; then
|
||||
export ocp_ec2_infra1=$(aws ec2 run-instances \
|
||||
--image-id ${ocp_ec2ami[1]} \
|
||||
--count 1 \
|
||||
--instance-type $ocp_ec2_infra_type \
|
||||
--key-name $(echo $ocp_keypair | jq -r '.KeyName') \
|
||||
--security-group-ids $(echo $ocp_awssg_infra | jq -r '.GroupId') \
|
||||
--subnet-id $(echo $ocp_subnet1 | jq -r '.Subnet.SubnetId') \
|
||||
--block-device-mappings "DeviceName=/dev/sda1,Ebs={DeleteOnTermination=False,VolumeSize=100}" \
|
||||
--tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=infra1},{Key=Clusterid,Value=$ocp_clusterid}]" \
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_ec2_infra2" ]; then
|
||||
export ocp_ec2_infra2=$(aws ec2 run-instances \
|
||||
--image-id ${ocp_ec2ami[1]} \
|
||||
--count 1 \
|
||||
--instance-type $ocp_ec2_infra_type \
|
||||
--key-name $(echo $ocp_keypair | jq -r '.KeyName') \
|
||||
--security-group-ids $(echo $ocp_awssg_infra | jq -r '.GroupId') \
|
||||
--subnet-id $(echo $ocp_subnet2 | jq -r '.Subnet.SubnetId') \
|
||||
--block-device-mappings "DeviceName=/dev/sda1,Ebs={DeleteOnTermination=False,VolumeSize=100}" \
|
||||
--tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=infra2},{Key=Clusterid,Value=$ocp_clusterid}]" \
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_ec2_infra3" ]; then
|
||||
export ocp_ec2_infra3=$(aws ec2 run-instances \
|
||||
--image-id ${ocp_ec2ami[1]} \
|
||||
--count 1 \
|
||||
--instance-type $ocp_ec2_infra_type \
|
||||
--key-name $(echo $ocp_keypair | jq -r '.KeyName') \
|
||||
--security-group-ids $(echo $ocp_awssg_infra | jq -r '.GroupId') \
|
||||
--subnet-id $(echo $ocp_subnet3 | jq -r '.Subnet.SubnetId') \
|
||||
--block-device-mappings "DeviceName=/dev/sda1,Ebs={DeleteOnTermination=False,VolumeSize=100}" \
|
||||
--tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=infra3},{Key=Clusterid,Value=$ocp_clusterid}]" \
|
||||
)
|
||||
fi
|
||||
if [ "$ocp_ec2_infra1" ] && [ "$ocp_ec2_infra2" ] && [ "$ocp_ec2_infra3" ]; then
|
||||
export ocp_elb_infrareg=$(aws elb register-instances-with-load-balancer \
|
||||
--load-balancer-name $ocp_clusterid-infra-external \
|
||||
--instances \
|
||||
$(echo $ocp_ec2_infra1 | jq -r '.Instances[].InstanceId') \
|
||||
$(echo $ocp_ec2_infra2 | jq -r '.Instances[].InstanceId') \
|
||||
$(echo $ocp_ec2_infra3 | jq -r '.Instances[].InstanceId') \
|
||||
)
|
||||
export ocp_elb_infrareg=$(aws elb register-instances-with-load-balancer \
|
||||
--load-balancer-name $ocp_clusterid-infra-internal \
|
||||
--instances \
|
||||
$(echo $ocp_ec2_infra1 | jq -r '.Instances[].InstanceId') \
|
||||
$(echo $ocp_ec2_infra2 | jq -r '.Instances[].InstanceId') \
|
||||
$(echo $ocp_ec2_infra3 | jq -r '.Instances[].InstanceId') \
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_ec2_node1" ]; then
|
||||
export ocp_ec2_node1=$(aws ec2 run-instances \
|
||||
--image-id ${ocp_ec2ami[1]} \
|
||||
--count 1 \
|
||||
--instance-type $ocp_ec2_node_type \
|
||||
--key-name $(echo $ocp_keypair | jq -r '.KeyName') \
|
||||
--security-group-ids $(echo $ocp_awssg_node | jq -r '.GroupId') \
|
||||
--subnet-id $(echo $ocp_subnet1 | jq -r '.Subnet.SubnetId') \
|
||||
--block-device-mappings "DeviceName=/dev/sda1,Ebs={DeleteOnTermination=False,VolumeSize=100}" \
|
||||
(??) --tag-specifications 'ResourceType=instance,Tags=[{Key=Name,Value=node1},{Key=Clusterid,Value=$ocp_clusterid}]'
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_ec2_node2" ]; then
|
||||
export ocp_ec2_node2=$(aws ec2 run-instances \
|
||||
--image-id ${ocp_ec2ami[1]} \
|
||||
--count 1 \
|
||||
--instance-type $ocp_ec2_node_type \
|
||||
--key-name $(echo $ocp_keypair | jq -r '.KeyName') \
|
||||
--security-group-ids $(echo $ocp_awssg_node | jq -r '.GroupId') \
|
||||
--subnet-id $(echo $ocp_subnet1 | jq -r '.Subnet.SubnetId') \
|
||||
--block-device-mappings "DeviceName=/dev/sda1,Ebs={DeleteOnTermination=False,VolumeSize=100}" \
|
||||
(??) --tag-specifications 'ResourceType=instance,Tags=[{Key=Name,Value=node2},{Key=Clusterid,Value=$ocp_clusterid}]'
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_ec2_node3" ]; then
|
||||
export ocp_ec2_node3=$(aws ec2 run-instances \
|
||||
--image-id ${ocp_ec2ami[1]} \
|
||||
--count 1 \
|
||||
--instance-type $ocp_ec2_node_type \
|
||||
--key-name $(echo $ocp_keypair | jq -r '.KeyName') \
|
||||
--security-group-ids $(echo $ocp_awssg_node | jq -r '.GroupId') \
|
||||
--subnet-id $(echo $ocp_subnet2 | jq -r '.Subnet.SubnetId') \
|
||||
--block-device-mappings "DeviceName=/dev/sda1,Ebs={DeleteOnTermination=False,VolumeSize=100}" \
|
||||
(??) --tag-specifications 'ResourceType=instance,Tags=[{Key=Name,Value=node3},{Key=Clusterid,Value=$ocp_clusterid}]'
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_ec2_node4" ]; then
|
||||
export ocp_ec2_node4=$(aws ec2 run-instances \
|
||||
--image-id ${ocp_ec2ami[1]} \
|
||||
--count 1 \
|
||||
--instance-type $ocp_ec2_node_type \
|
||||
--key-name $(echo $ocp_keypair | jq -r '.KeyName') \
|
||||
--security-group-ids $(echo $ocp_awssg_node | jq -r '.GroupId') \
|
||||
--subnet-id $(echo $ocp_subnet2 | jq -r '.Subnet.SubnetId') \
|
||||
--block-device-mappings "DeviceName=/dev/sda1,Ebs={DeleteOnTermination=False,VolumeSize=100}" \
|
||||
(??) --tag-specifications 'ResourceType=instance,Tags=[{Key=Name,Value=node4},{Key=Clusterid,Value=$ocp_clusterid}]'
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_ec2_node5" ]; then
|
||||
export ocp_ec2_node5=$(aws ec2 run-instances \
|
||||
--image-id ${ocp_ec2ami[1]} \
|
||||
--count 1 \
|
||||
--instance-type $ocp_ec2_node_type \
|
||||
--key-name $(echo $ocp_keypair | jq -r '.KeyName') \
|
||||
--security-group-ids $(echo $ocp_awssg_node | jq -r '.GroupId') \
|
||||
--subnet-id $(echo $ocp_subnet3 | jq -r '.Subnet.SubnetId') \
|
||||
--block-device-mappings "DeviceName=/dev/sda1,Ebs={DeleteOnTermination=False,VolumeSize=100}" \
|
||||
(??) --tag-specifications 'ResourceType=instance,Tags=[{Key=Name,Value=node5},{Key=Clusterid,Value=$ocp_clusterid}]'
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_ec2_node6" ]; then
|
||||
export ocp_ec2_node6=$(aws ec2 run-instances \
|
||||
--image-id ${ocp_ec2ami[1]} \
|
||||
--count 1 \
|
||||
--instance-type $ocp_ec2_node_type \
|
||||
--key-name $(echo $ocp_keypair | jq -r '.KeyName') \
|
||||
--security-group-ids $(echo $ocp_awssg_node | jq -r '.GroupId') \
|
||||
--subnet-id $(echo $ocp_subnet3 | jq -r '.Subnet.SubnetId') \
|
||||
--block-device-mappings "DeviceName=/dev/sda1,Ebs={DeleteOnTermination=False,VolumeSize=100}" \
|
||||
(??) --tag-specifications 'ResourceType=instance,Tags=[{Key=Name,Value=node6},{Key=Clusterid,Value=$ocp_clusterid}]'
|
||||
)
|
||||
fi
|
||||
|
||||
export ocp_hostinv="\
|
||||
{ \"masters\": [ \
|
||||
\"$(echo $ocp_ec2_master1 | jq -r '.Instances[].PrivateDnsName')\", \
|
||||
\"$(echo $ocp_ec2_master2 | jq -r '.Instances[].PrivateDnsName')\", \
|
||||
\"$(echo $ocp_ec2_master3 | jq -r '.Instances[].PrivateDnsName')\" \
|
||||
], \
|
||||
\"etcd\": [ \
|
||||
\"masters\" \
|
||||
], \
|
||||
\"routers\": [ \
|
||||
\"$(echo $ocp_ec2_infra1 | jq -r '.Instances[].PrivateDnsName')\", \
|
||||
\"$(echo $ocp_ec2_infra2 | jq -r '.Instances[].PrivateDnsName')\", \
|
||||
\"$(echo $ocp_ec2_infra3 | jq -r '.Instances[].PrivateDnsName')\" \
|
||||
], \
|
||||
\"nodes\": [ \
|
||||
\"$(echo $ocp_ec2_node1 | jq -r '.Instances[].PrivateDnsName')\", \
|
||||
\"$(echo $ocp_ec2_node2 | jq -r '.Instances[].PrivateDnsName')\", \
|
||||
\"$(echo $ocp_ec2_node3 | jq -r '.Instances[].PrivateDnsName')\", \
|
||||
\"$(echo $ocp_ec2_node4 | jq -r '.Instances[].PrivateDnsName')\", \
|
||||
\"$(echo $ocp_ec2_node5 | jq -r '.Instances[].PrivateDnsName')\", \
|
||||
\"$(echo $ocp_ec2_node6 | jq -r '.Instances[].PrivateDnsName')\" \
|
||||
] \
|
||||
}"
|
||||
@@ -1,10 +0,0 @@
|
||||
export ocp_clusterid="examplerefarch"
|
||||
export ocp_region="us-east-1"
|
||||
export ocp_cidrblock="172.16.0.0/16"
|
||||
export ocp_cidrsubnets_routing=("172.16.0.0/24" "172.16.1.0/24" "172.16.2.0/24")
|
||||
export ocp_cidrsubnets=("172.16.16.0/20" "172.16.32.0/20" "172.16.48.0/20")
|
||||
export ocp_domain="examplerefarch.com"
|
||||
export ocp_ec2_bastion_type="t2.medium"
|
||||
export ocp_ec2_master_type="m5.2xlarge"
|
||||
export ocp_ec2_infra_type="m5.2xlarge"
|
||||
export ocp_ec2_node_type="m5.2xlarge"
|
||||
@@ -1,72 +0,0 @@
|
||||
if [ ! "$ocp_ec2ami" ]; then
|
||||
export ocp_ec2ami=($(aws ec2 describe-images --owners 309956199498 | \
|
||||
jq -r '.Images[] | [.Name,.ImageId] | @csv' | \
|
||||
sed -e 's/,/ /g' | \
|
||||
sed -e 's/"//g' | \
|
||||
grep -v Beta | \
|
||||
grep RHEL-7 | \
|
||||
grep Access2-GP2 | \
|
||||
sort | \
|
||||
tail -1))
|
||||
fi
|
||||
|
||||
if [ ! -f $HOME/.ssh/${ocp_clusterid} ]; then
|
||||
echo 'Enter ssh key password'
|
||||
read -r passphrase
|
||||
ssh-keygen -P $passphrase -o -t rsa -f ~/.ssh/${ocp_clusterid}
|
||||
fi
|
||||
export sshkey=($(cat ~/.ssh/${ocp_clusterid}.pub))
|
||||
|
||||
if [ ! "$(env | grep SSH_AGENT_PID)" ] || [ ! "$(ps -ef | grep $SSH_AGENT_PID)" ]; then
|
||||
rm -rf $SSH_AUTH_SOCK
|
||||
unset SSH_AUTH_SOCK
|
||||
pkill ssh-agent
|
||||
export sshagent=$(nohup ssh-agent &)
|
||||
export sshagent=($(echo $sshagent | awk -F'; ' {'print $1 " " $3'}))
|
||||
export ${sshagent[0]}
|
||||
export ${sshagent[1]}
|
||||
unset sshagent
|
||||
fi
|
||||
|
||||
IFS=$'\n'
|
||||
if [ ! $(ssh-add -L | grep ${sshkey[1]}) ]; then
|
||||
echo ssh-add
|
||||
ssh-add ~/.ssh/${ocp_clusterid}
|
||||
fi
|
||||
unset IFS
|
||||
|
||||
if [ ! "$ocp_keypair" ]; then
|
||||
export ocp_keypair=$(aws ec2 import-key-pair \
|
||||
--key-name ${ocp_clusterid} \
|
||||
--public-key-material file://~/.ssh/$ocp_clusterid.pub \
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_iamuser" ]; then
|
||||
export ocp_iamuser=$(aws iam create-user --user-name ${ocp_clusterid}-registry)
|
||||
sleep 30
|
||||
aws iam attach-user-policy \
|
||||
--policy-arn arn:aws:iam::aws:policy/AmazonS3FullAccess \
|
||||
--user-name ${ocp_clusterid}-registry
|
||||
fi
|
||||
if [ ! "$ocp_iamuser_accesskey" ]; then
|
||||
export ocp_iamuser_accesskey=$(aws iam create-access-key --user-name ${ocp_clusterid}-registry)
|
||||
fi
|
||||
|
||||
if [ ! "$ocp_aws_s3bucket" ]; then
|
||||
export ocp_aws_s3bucket=$(aws s3api create-bucket --bucket $(echo ${ocp_clusterid}-registry))
|
||||
aws s3api put-bucket-policy \
|
||||
--bucket $(echo ${ocp_clusterid}-registry) \
|
||||
--policy "\
|
||||
{ \
|
||||
\"Statement\": [ \
|
||||
{ \
|
||||
\"Action\": \"s3:*\", \
|
||||
\"Effect\": \"Allow\", \
|
||||
\"Principal\": { \
|
||||
\"AWS\": \"$(echo $ocp_iamuser | jq -r '.User.Arn')\" \
|
||||
}, \
|
||||
\"Resource\": \"arn:aws:s3:::$(echo $ocp_aws_s3bucket | jq -r '.Location' | sed -e 's/^\///g')\" \
|
||||
} \
|
||||
] \
|
||||
}"
|
||||
fi
|
||||
@@ -1,381 +0,0 @@
|
||||
if [ ! "$ocp_vpc" ]; then
|
||||
export ocp_vpc=$(aws ec2 create-vpc --cidr-block $ocp_cidrblock | jq -r '.')
|
||||
fi
|
||||
|
||||
if [ ! "$ocp_vpcdhcpopts" ]; then
|
||||
export ocp_vpcdhcpopts=$(aws ec2 create-dhcp-options \
|
||||
--dhcp-configuration " \
|
||||
[ \
|
||||
{ \"Key\" : \"domain-name\", \"Values\" : [ \"ec2.internal\" ] }, \
|
||||
{ \"Key\" : \"domain-name-servers\", \"Values\" : [ \"AmazonProvidedDNS\" ] }, \
|
||||
{ \"Key\" : \"ntp-servers\", \"Values\" : [ \
|
||||
\"$(dig 0.rhel.pool.ntp.org +short | head -1)\", \
|
||||
\"$(dig 1.rhel.pool.ntp.org +short | head -1)\", \
|
||||
\"$(dig 2.rhel.pool.ntp.org +short | head -1)\", \
|
||||
\"$(dig 3.rhel.pool.ntp.org +short | head -1)\" \
|
||||
] \
|
||||
} \
|
||||
]" | \
|
||||
jq -r '.')
|
||||
aws ec2 modify-vpc-attribute \
|
||||
--enable-dns-hostnames \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId')
|
||||
aws ec2 modify-vpc-attribute \
|
||||
--enable-dns-support \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId')
|
||||
aws ec2 associate-dhcp-options \
|
||||
--dhcp-options-id $(echo $ocp_vpcdhcpopts | jq -r '.DhcpOptions.DhcpOptionsId') \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId')
|
||||
fi
|
||||
|
||||
if [ ! "$ocp_az" ]; then
|
||||
export ocp_az=($(aws ec2 describe-availability-zones \
|
||||
--filters "Name=region-name,Values=$ocp_region" | \
|
||||
jq -r '.[][].ZoneName' | \
|
||||
head -3 | \
|
||||
tr '\n' ' ' | \
|
||||
sed -e "s/ $//g"))
|
||||
fi
|
||||
|
||||
if [ ! "$ocp_subnet1_routing" ]; then
|
||||
export ocp_subnet1_routing=$(aws ec2 create-subnet \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId') \
|
||||
--cidr-block ${ocp_cidrsubnets_routing[0]} \
|
||||
--availability-zone ${ocp_az[0]})
|
||||
fi
|
||||
if [ ! "$ocp_subnet2_routing" ]; then
|
||||
export ocp_subnet2_routing=$(aws ec2 create-subnet \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId') \
|
||||
--cidr-block ${ocp_cidrsubnets_routing[1]} \
|
||||
--availability-zone ${ocp_az[1]})
|
||||
fi
|
||||
if [ ! "$ocp_subnet3_routing" ]; then
|
||||
export ocp_subnet3_routing=$(aws ec2 create-subnet \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId') \
|
||||
--cidr-block ${ocp_cidrsubnets_routing[2]} \
|
||||
--availability-zone ${ocp_az[2]})
|
||||
fi
|
||||
|
||||
if [ ! "$ocp_subnet1" ]; then
|
||||
export ocp_subnet1=$(aws ec2 create-subnet \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId') \
|
||||
--cidr-block ${ocp_cidrsubnets[0]} \
|
||||
--availability-zone ${ocp_az[0]})
|
||||
fi
|
||||
if [ ! "$ocp_subnet2" ]; then
|
||||
export ocp_subnet2=$(aws ec2 create-subnet \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId') \
|
||||
--cidr-block ${ocp_cidrsubnets[1]} \
|
||||
--availability-zone ${ocp_az[1]})
|
||||
fi
|
||||
if [ ! "$ocp_subnet3" ]; then
|
||||
export ocp_subnet3=$(aws ec2 create-subnet \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId') \
|
||||
--cidr-block ${ocp_cidrsubnets[2]} \
|
||||
--availability-zone ${ocp_az[2]})
|
||||
fi
|
||||
|
||||
if [ ! "$ocp_igw" ]; then
|
||||
export ocp_igw=$(aws ec2 create-internet-gateway)
|
||||
aws ec2 attach-internet-gateway \
|
||||
--internet-gateway-id $(echo $ocp_igw | jq -r '.InternetGateway.InternetGatewayId') \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId')
|
||||
fi
|
||||
|
||||
if [ ! "$ocp_eip0" ]; then
|
||||
export ocp_eip0=$(aws ec2 allocate-address --domain vpc)
|
||||
fi
|
||||
if [ ! "$ocp_eip1" ]; then
|
||||
export ocp_eip1=$(aws ec2 allocate-address --domain vpc)
|
||||
fi
|
||||
if [ ! "$ocp_eip2" ]; then
|
||||
export ocp_eip2=$(aws ec2 allocate-address --domain vpc)
|
||||
fi
|
||||
if [ ! "$ocp_eip3" ]; then
|
||||
export ocp_eip3=$(aws ec2 allocate-address --domain vpc)
|
||||
fi
|
||||
|
||||
if [ ! "$ocp_natgw1" ]; then
|
||||
export ocp_natgw1=$(aws ec2 create-nat-gateway \
|
||||
--subnet-id $(echo $ocp_subnet1_routing | jq -r '.Subnet.SubnetId') \
|
||||
--allocation-id $(echo $ocp_eip1 | jq -r '.AllocationId') \
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_natgw2" ]; then
|
||||
export ocp_natgw2=$(aws ec2 create-nat-gateway \
|
||||
--subnet-id $(echo $ocp_subnet2_routing | jq -r '.Subnet.SubnetId') \
|
||||
--allocation-id $(echo $ocp_eip2 | jq -r '.AllocationId') \
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_natgw3" ]; then
|
||||
export ocp_natgw3=$(aws ec2 create-nat-gateway \
|
||||
--subnet-id $(echo $ocp_subnet3_routing | jq -r '.Subnet.SubnetId') \
|
||||
--allocation-id $(echo $ocp_eip3 | jq -r '.AllocationId') \
|
||||
)
|
||||
fi
|
||||
|
||||
if [ ! "$ocp_routetable0" ]; then
|
||||
export ocp_routetable0=$(aws ec2 create-route-table \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId')
|
||||
)
|
||||
aws ec2 create-route \
|
||||
--route-table-id $(echo $ocp_routetable0 | jq -r '.RouteTable.RouteTableId') \
|
||||
--destination-cidr-block 0.0.0.0/0 \
|
||||
--nat-gateway-id $(echo $ocp_igw | jq -r '.InternetGateway.InternetGatewayId') \
|
||||
> /dev/null 2>&1
|
||||
fi
|
||||
if [ ! "$ocp_rtba0_subnet1_routing" ]; then
|
||||
export ocp_rtba0_subnet1_routing=$(aws ec2 associate-route-table \
|
||||
--route-table-id $(echo $ocp_routetable0 | jq -r '.RouteTable.RouteTableId') \
|
||||
--subnet-id $(echo $ocp_subnet1_routing | jq -r '.Subnet.SubnetId')
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_rtba0_subnet2_routing" ]; then
|
||||
export ocp_rtba0_subnet2_routing=$(aws ec2 associate-route-table \
|
||||
--route-table-id $(echo $ocp_routetable0 | jq -r '.RouteTable.RouteTableId') \
|
||||
--subnet-id $(echo $ocp_subnet2_routing | jq -r '.Subnet.SubnetId')
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_rtba0_subnet3_routing" ]; then
|
||||
export ocp_rtba0_subnet3_routing=$(aws ec2 associate-route-table \
|
||||
--route-table-id $(echo $ocp_routetable0 | jq -r '.RouteTable.RouteTableId') \
|
||||
--subnet-id $(echo $ocp_subnet3_routing | jq -r '.Subnet.SubnetId')
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_routetable1" ]; then
|
||||
export ocp_routetable1=$(aws ec2 create-route-table \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId')
|
||||
)
|
||||
aws ec2 create-route \
|
||||
--route-table-id $(echo $ocp_routetable1 | jq -r '.RouteTable.RouteTableId') \
|
||||
--destination-cidr-block 0.0.0.0/0 \
|
||||
--nat-gateway-id $(echo $ocp_natgw1 | jq -r '.NatGateway.NatGatewayId') \
|
||||
> /dev/null 2>&1
|
||||
fi
|
||||
if [ ! "$ocp_rtba1" ]; then
|
||||
export ocp_rtba1=$(aws ec2 associate-route-table \
|
||||
--route-table-id $(echo $ocp_routetable1 | jq -r '.RouteTable.RouteTableId') \
|
||||
--subnet-id $(echo $ocp_subnet1 | jq -r '.Subnet.SubnetId') \
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_routetable2" ]; then
|
||||
export ocp_routetable2=$(aws ec2 create-route-table \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId')
|
||||
)
|
||||
aws ec2 create-route \
|
||||
--route-table-id $(echo $ocp_routetable2 | jq -r '.RouteTable.RouteTableId') \
|
||||
--destination-cidr-block 0.0.0.0/0 \
|
||||
--nat-gateway-id $(echo $ocp_natgw2 | jq -r '.NatGateway.NatGatewayId') \
|
||||
> /dev/null 2>&1
|
||||
fi
|
||||
if [ ! "$ocp_rtba2" ]; then
|
||||
export ocp_rtba2=$(aws ec2 associate-route-table \
|
||||
--route-table-id $(echo $ocp_routetable2 | jq -r '.RouteTable.RouteTableId') \
|
||||
--subnet-id $(echo $ocp_subnet2 | jq -r '.Subnet.SubnetId') \
|
||||
)
|
||||
fi
|
||||
if [ ! "$ocp_routetable3" ]; then
|
||||
export ocp_routetable3=$(aws ec2 create-route-table \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId')
|
||||
)
|
||||
aws ec2 create-route \
|
||||
--route-table-id $(echo $ocp_routetable3 | jq -r '.RouteTable.RouteTableId') \
|
||||
--destination-cidr-block 0.0.0.0/0 \
|
||||
--nat-gateway-id $(echo $ocp_natgw3 | jq -r '.NatGateway.NatGatewayId') \
|
||||
> /dev/null 2>&1
|
||||
fi
|
||||
if [ ! "$ocp_rtba3" ]; then
|
||||
export ocp_rtba3=$(aws ec2 associate-route-table \
|
||||
--route-table-id $(echo $ocp_routetable3 | jq -r '.RouteTable.RouteTableId') \
|
||||
--subnet-id $(echo $ocp_subnet3 | jq -r '.Subnet.SubnetId') \
|
||||
)
|
||||
fi
|
||||
|
||||
if [ ! "$ocp_awssg_bastion" ]; then
|
||||
export ocp_awssg_bastion=$(aws ec2 create-security-group \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId') \
|
||||
--group-name bastion \
|
||||
--description "bastion")
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-id $(echo $ocp_awssg_bastion | jq -r '.GroupId') \
|
||||
--protocol tcp \
|
||||
--port 22 \
|
||||
--cidr 0.0.0.0/0
|
||||
fi
|
||||
if [ ! "$ocp_awssg_master" ]; then
|
||||
export ocp_awssg_master=$(aws ec2 create-security-group \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId') \
|
||||
--group-name master \
|
||||
--description "master")
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-id $(echo $ocp_awssg_master | jq -r '.GroupId') \
|
||||
--protocol tcp \
|
||||
--port 1-65535 \
|
||||
--cidr 0.0.0.0/0
|
||||
fi
|
||||
if [ ! "$ocp_awssg_infra" ]; then
|
||||
export ocp_awssg_infra=$(aws ec2 create-security-group \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId') \
|
||||
--group-name infra \
|
||||
--description "infra")
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-id $(echo $ocp_awssg_infra | jq -r '.GroupId') \
|
||||
--protocol tcp \
|
||||
--port 1-65535 \
|
||||
--cidr 0.0.0.0/0
|
||||
fi
|
||||
if [ ! "$ocp_awssg_node" ]; then
|
||||
export ocp_awssg_node=$(aws ec2 create-security-group \
|
||||
--vpc-id $(echo $ocp_vpc | jq -r '.Vpc.VpcId') \
|
||||
--group-name node \
|
||||
--description "node")
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-id $(echo $ocp_awssg_node | jq -r '.GroupId') \
|
||||
--protocol tcp \
|
||||
--port 1-65535 \
|
||||
--cidr 0.0.0.0/0
|
||||
fi
|
||||
|
||||
if [ ! "$ocp_elb_masterext" ]; then
|
||||
export ocp_elb_masterext=$(aws elb create-load-balancer \
|
||||
--load-balancer-name $ocp_clusterid-master-external \
|
||||
--subnets \
|
||||
$(echo $ocp_subnet1 | jq -r '.Subnet.SubnetId') \
|
||||
$(echo $ocp_subnet2 | jq -r '.Subnet.SubnetId') \
|
||||
$(echo $ocp_subnet3 | jq -r '.Subnet.SubnetId') \
|
||||
--listener Protocol=TCP,LoadBalancerPort=443,InstanceProtocol=TCP,InstancePort=443 \
|
||||
--security-groups $(echo $ocp_awssg_master | jq -r '.GroupId') \
|
||||
--scheme internet-facing \
|
||||
--tags Key=Clusterid,Value=$ocp_clusterid Key=kubernetes.io/cluster/$ocp_clusterid,Value=$ocp_clusterid)
|
||||
fi
|
||||
if [ ! "$ocp_elb_masterint" ]; then
|
||||
export ocp_elb_masterint=$(aws elb create-load-balancer \
|
||||
--load-balancer-name $ocp_clusterid-master-internal \
|
||||
--subnets \
|
||||
$(echo $ocp_subnet1 | jq -r '.Subnet.SubnetId') \
|
||||
$(echo $ocp_subnet2 | jq -r '.Subnet.SubnetId') \
|
||||
$(echo $ocp_subnet3 | jq -r '.Subnet.SubnetId') \
|
||||
--listener Protocol=TCP,LoadBalancerPort=443,InstanceProtocol=TCP,InstancePort=443 \
|
||||
--security-groups $(echo $ocp_awssg_master | jq -r '.GroupId') \
|
||||
--scheme internal \
|
||||
--tags Key=Clusterid,Value=$ocp_clusterid Key=kubernetes.io/cluster/$ocp_clusterid,Value=$ocp_clusterid)
|
||||
fi
|
||||
if [ ! "$ocp_elb_infraext" ]; then
|
||||
export ocp_elb_infraext=$(aws elb create-load-balancer \
|
||||
--load-balancer-name $ocp_clusterid-infra-external \
|
||||
--subnets \
|
||||
$(echo $ocp_subnet1 | jq -r '.Subnet.SubnetId') \
|
||||
$(echo $ocp_subnet2 | jq -r '.Subnet.SubnetId') \
|
||||
$(echo $ocp_subnet3 | jq -r '.Subnet.SubnetId') \
|
||||
--listener Protocol=TCP,LoadBalancerPort=443,InstanceProtocol=TCP,InstancePort=443 \
|
||||
--security-groups $(echo $ocp_awssg_infra | jq -r '.GroupId') \
|
||||
--scheme internet-facing \
|
||||
--tags Key=Clusterid,Value=$ocp_clusterid Key=kubernetes.io/cluster/$ocp_clusterid,Value=$ocp_clusterid)
|
||||
fi
|
||||
if [ ! "$ocp_elb_infraint" ]; then
|
||||
export ocp_elb_infraint=$(aws elb create-load-balancer \
|
||||
--load-balancer-name $ocp_clusterid-infra-internal \
|
||||
--subnets \
|
||||
$(echo $ocp_subnet1 | jq -r '.Subnet.SubnetId') \
|
||||
$(echo $ocp_subnet2 | jq -r '.Subnet.SubnetId') \
|
||||
$(echo $ocp_subnet3 | jq -r '.Subnet.SubnetId') \
|
||||
--listener Protocol=TCP,LoadBalancerPort=443,InstanceProtocol=TCP,InstancePort=443 \
|
||||
--security-groups $(echo $ocp_awssg_infra | jq -r '.GroupId') \
|
||||
--scheme internal \
|
||||
--tags Key=Clusterid,Value=$ocp_clusterid Key=kubernetes.io/cluster/$ocp_clusterid,Value=$ocp_clusterid)
|
||||
fi
|
||||
|
||||
if [ ! "$ocp_route53_extzone" ]; then
|
||||
export ocp_route53_extzone=$(aws route53 create-hosted-zone \
|
||||
--caller-reference $(date +%s) \
|
||||
--name $ocp_domain \
|
||||
--hosted-zone-config "PrivateZone=False")
|
||||
fi
|
||||
if [ ! "$ocp_route53_intzone" ]; then
|
||||
export ocp_route53_intzone=$(aws route53 create-hosted-zone \
|
||||
--caller-reference $(date +%s) \
|
||||
--name $ocp_domain \
|
||||
--vpc "VPCRegion=$ocp_region,VPCId=$(echo $ocp_vpc | jq -r '.Vpc.VpcId')" \
|
||||
--hosted-zone-config "PrivateZone=True")
|
||||
fi
|
||||
|
||||
if [ ! "$aws_route53rrset_masterext" ]; then
|
||||
export aws_route53rrset_masterext=$(aws route53 change-resource-record-sets \
|
||||
--hosted-zone-id $(echo $ocp_route53_extzone | jq -r '.HostedZone.Id' | sed 's/\/hostedzone\///g') \
|
||||
--change-batch "\
|
||||
{ \
|
||||
\"Changes\": [ \
|
||||
{ \
|
||||
\"Action\": \"CREATE\", \
|
||||
\"ResourceRecordSet\": { \
|
||||
\"Name\": \"api.$ocp_domain\", \
|
||||
\"Type\": \"CNAME\", \
|
||||
\"TTL\": 300, \
|
||||
\"ResourceRecords\": [ \
|
||||
{ \"Value\": \"$(echo $ocp_elb_masterext | jq -r '.DNSName')\" } \
|
||||
] \
|
||||
} \
|
||||
} \
|
||||
] \
|
||||
}")
|
||||
fi
|
||||
if [ ! "$aws_route53rrset_masterint" ]; then
|
||||
export aws_route53rrset_masterint=$(aws route53 change-resource-record-sets \
|
||||
--hosted-zone-id $(echo $ocp_route53_intzone | jq -r '.HostedZone.Id' | sed 's/\/hostedzone\///g') \
|
||||
--change-batch "\
|
||||
{ \
|
||||
\"Changes\": [ \
|
||||
{ \
|
||||
\"Action\": \"CREATE\", \
|
||||
\"ResourceRecordSet\": { \
|
||||
\"Name\": \"api.$ocp_domain\", \
|
||||
\"Type\": \"CNAME\", \
|
||||
\"TTL\": 300, \
|
||||
\"ResourceRecords\": [ \
|
||||
{ \"Value\": \"$(echo $ocp_elb_masterint | jq -r '.DNSName')\" } \
|
||||
] \
|
||||
} \
|
||||
} \
|
||||
] \
|
||||
}")
|
||||
fi
|
||||
if [ ! "$aws_route53rrset_infraext" ]; then
|
||||
export aws_route53rrset_infraext=$(aws route53 change-resource-record-sets \
|
||||
--hosted-zone-id $(echo $ocp_route53_extzone | jq -r '.HostedZone.Id' | sed 's/\/hostedzone\///g') \
|
||||
--change-batch "\
|
||||
{ \
|
||||
\"Changes\": [ \
|
||||
{ \
|
||||
\"Action\": \"CREATE\", \
|
||||
\"ResourceRecordSet\": { \
|
||||
\"Name\": \"*.apps.$ocp_domain\", \
|
||||
\"Type\": \"CNAME\", \
|
||||
\"TTL\": 300, \
|
||||
\"ResourceRecords\": [ \
|
||||
{ \"Value\": \"$(echo $ocp_elb_infraext | jq -r '.DNSName')\" } \
|
||||
] \
|
||||
} \
|
||||
} \
|
||||
] \
|
||||
}")
|
||||
fi
|
||||
if [ ! "$aws_route53rrset_infraint" ]; then
|
||||
export aws_route53rrset_infraint=$(aws route53 change-resource-record-sets \
|
||||
--hosted-zone-id $(echo $ocp_route53_intzone | jq -r '.HostedZone.Id' | sed 's/\/hostedzone\///g') \
|
||||
--change-batch "\
|
||||
{ \
|
||||
\"Changes\": [ \
|
||||
{ \
|
||||
\"Action\": \"CREATE\", \
|
||||
\"ResourceRecordSet\": { \
|
||||
\"Name\": \"*.apps.$ocp_domain\", \
|
||||
\"Type\": \"CNAME\", \
|
||||
\"TTL\": 300, \
|
||||
\"ResourceRecords\": [ \
|
||||
{ \"Value\": \"$(echo $ocp_elb_infraint | jq -r '.DNSName')\" } \
|
||||
] \
|
||||
} \
|
||||
} \
|
||||
] \
|
||||
}")
|
||||
fi
|
||||
@@ -1,94 +0,0 @@
|
||||
aws ec2 create-tags --resources $(echo $ocp_vpc | jq -r '.Vpc.VpcId') --tags Key=Name,Value=$ocp_clusterid
|
||||
aws ec2 create-tags --resources $(echo $ocp_vpcdhcpopts | jq -r '.DhcpOptions.DhcpOptionsId') --tags Key=Name,Value=$ocp_clusterid
|
||||
aws ec2 create-tags --resources $(echo $ocp_igw | jq -r '.InternetGateway.InternetGatewayId') --tags Key=Name,Value=$ocp_clusterid
|
||||
aws ec2 create-tags --resources $(echo $ocp_subnet1_routing | jq -r '.Subnet.SubnetId') --tags Key=Name,Value=${ocp_az[0]}_routing
|
||||
aws ec2 create-tags --resources $(echo $ocp_subnet2_routing | jq -r '.Subnet.SubnetId') --tags Key=Name,Value=${ocp_az[1]}_routing
|
||||
aws ec2 create-tags --resources $(echo $ocp_subnet3_routing | jq -r '.Subnet.SubnetId') --tags Key=Name,Value=${ocp_az[2]}_routing
|
||||
aws ec2 create-tags --resources $(echo $ocp_subnet1 | jq -r '.Subnet.SubnetId') --tags Key=Name,Value=${ocp_az[0]}
|
||||
aws ec2 create-tags --resources $(echo $ocp_subnet2 | jq -r '.Subnet.SubnetId') --tags Key=Name,Value=${ocp_az[1]}
|
||||
aws ec2 create-tags --resources $(echo $ocp_subnet3 | jq -r '.Subnet.SubnetId') --tags Key=Name,Value=${ocp_az[2]}
|
||||
aws ec2 create-tags --resources $(echo $ocp_eip0 | jq -r '.AllocationId') --tags Key=Name,Value=bastion
|
||||
aws ec2 create-tags --resources $(echo $ocp_eip1 | jq -r '.AllocationId') --tags Key=Name,Value=${ocp_az[0]}
|
||||
aws ec2 create-tags --resources $(echo $ocp_eip2 | jq -r '.AllocationId') --tags Key=Name,Value=${ocp_az[1]}
|
||||
aws ec2 create-tags --resources $(echo $ocp_eip3 | jq -r '.AllocationId') --tags Key=Name,Value=${ocp_az[2]}
|
||||
aws ec2 create-tags --resources $(echo $ocp_natgw1 | jq -r '.NatGateway.NatGatewayId') --tags Key=Name,Value=${ocp_az[0]}
|
||||
aws ec2 create-tags --resources $(echo $ocp_natgw2 | jq -r '.NatGateway.NatGatewayId') --tags Key=Name,Value=${ocp_az[1]}
|
||||
aws ec2 create-tags --resources $(echo $ocp_natgw3 | jq -r '.NatGateway.NatGatewayId') --tags Key=Name,Value=${ocp_az[2]}
|
||||
aws ec2 create-tags --resources $(echo $ocp_routetable1 | jq -r '.RouteTable.RouteTableId') --tags Key=Name,Value=${ocp_az[0]}
|
||||
aws ec2 create-tags --resources $(echo $ocp_routetable2 | jq -r '.RouteTable.RouteTableId') --tags Key=Name,Value=${ocp_az[1]}
|
||||
aws ec2 create-tags --resources $(echo $ocp_routetable3 | jq -r '.RouteTable.RouteTableId') --tags Key=Name,Value=${ocp_az[2]}
|
||||
aws ec2 create-tags --resources $(echo $ocp_awssg_bastion | jq -r '.GroupId') --tags Key=Name,Value=bastion
|
||||
aws ec2 create-tags --resources $(echo $ocp_awssg_bastion | jq -r '.GroupId') --tags Key=clusterid,Value=${ocp_clusterid}
|
||||
aws ec2 create-tags --resources $(echo $ocp_awssg_master | jq -r '.GroupId') --tags Key=Name,Value=Master
|
||||
aws ec2 create-tags --resources $(echo $ocp_awssg_master | jq -r '.GroupId') --tags Key=clusterid,Value=${ocp_clusterid}
|
||||
aws ec2 create-tags --resources $(echo $ocp_awssg_infra | jq -r '.GroupId') --tags Key=Name,Value=Infra
|
||||
aws ec2 create-tags --resources $(echo $ocp_awssg_infra | jq -r '.GroupId') --tags Key=clusterid,Value=${ocp_clusterid}
|
||||
aws ec2 create-tags --resources $(echo $ocp_awssg_node | jq -r '.GroupId') --tags Key=Name,Value=Node
|
||||
aws ec2 create-tags --resources $(echo $ocp_awssg_node | jq -r '.GroupId') --tags Key=clusterid,Value=${ocp_clusterid}
|
||||
|
||||
if [ "$ocp_route53_extzone" ]; then
|
||||
echo "Domain $ocp_domain will need delegation set to the following nameservers"
|
||||
echo $ocp_route53_extzone | jq -r '.DelegationSet.NameServers[]'
|
||||
fi
|
||||
|
||||
echo
|
||||
echo
|
||||
|
||||
echo "Add the following to ~/.ssh/config
|
||||
Host bastion
|
||||
HostName $(echo $ocp_eip0 | jq -r '.PublicIp')
|
||||
User ec2-user
|
||||
CheckHostIP no
|
||||
ForwardAgent yes
|
||||
ProxyCommand none
|
||||
StrictHostKeyChecking no
|
||||
IdentityFile ~/.ssh/${ocp_clusterid}
|
||||
|
||||
Host *.compute-1.amazonaws.com
|
||||
user ec2-user
|
||||
StrictHostKeyChecking no
|
||||
CheckHostIP no
|
||||
IdentityFile ~/.ssh/${ocp_clusterid}
|
||||
|
||||
Host *.ec2.internal
|
||||
ProxyCommand ssh ec2-user@bastion -W %h:%p
|
||||
user ec2-user
|
||||
StrictHostKeyChecking no
|
||||
CheckHostIP no
|
||||
IdentityFile ~/.ssh/${ocp_clusterid}"
|
||||
|
||||
echo
|
||||
echo
|
||||
|
||||
echo "Add the following to openshift-ansible installer inventory"
|
||||
echo "openshift_hosted_registry_storage_s3_accesskey=$(echo $ocp_iamuser_accesskey | jq -r '.AccessKey.AccessKeyId')"
|
||||
echo "openshift_hosted_registry_storage_s3_secretkey=$(echo $ocp_iamuser_accesskey | jq -r '.AccessKey.SecretAccessKey')"
|
||||
|
||||
cat >> ~/.ssh/examplerefarch-s3user_access_key << EOF
|
||||
<!-- BEGIN ANSIBLE MANAGED BLOCK -->
|
||||
openshift_hosted_registry_storage_s3_accesskey=$(echo $ocp_iamuser_accesskey | jq -r '.AccessKey.AccessKeyId')
|
||||
openshift_hosted_registry_storage_s3_secretkey=$(echo $ocp_iamuser_accesskey | jq -r '.AccessKey.SecretAccessKey')
|
||||
<!-- END ANSIBLE MANAGED BLOCK -->
|
||||
EOF
|
||||
echo "IAM s3 user access key stored in ~/.ssh/${ocp_clusterid}-s3user_access_key"
|
||||
echo "Add the following to openshift-ansible installer inventory"
|
||||
cat ~/.ssh/${ocp_clusterid}-s3user_access_key | grep -v '<!--'
|
||||
|
||||
echo
|
||||
echo
|
||||
|
||||
echo "Add the following to openshift-ansible installer inventory
|
||||
[masters]
|
||||
$(echo $ocp_hostinv | jq -r '.masters[]')
|
||||
|
||||
[etcd]
|
||||
$(echo $ocp_hostinv | jq -r '.etcd[]'))
|
||||
|
||||
[routers]
|
||||
$(echo $ocp_hostinv | jq -r '.routers[]')
|
||||
|
||||
[nodes]
|
||||
$(echo $ocp_hostinv | jq -r '.nodes[]')
|
||||
|
||||
[nodes:children]
|
||||
masters"
|
||||
31
reference-architecture/3.9/playbooks/deploy_aws.yaml
Normal file
31
reference-architecture/3.9/playbooks/deploy_aws.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
become: False
|
||||
vars_files:
|
||||
- vars/main.yaml
|
||||
tasks:
|
||||
- import_tasks: roles/aws/tasks/getcreds.yaml
|
||||
- import_tasks: roles/aws/tasks/getazs.yaml
|
||||
- import_tasks: roles/aws/tasks/getec2ami.yaml
|
||||
- import_tasks: roles/aws/tasks/sshkeys.yaml
|
||||
- import_tasks: roles/aws/tasks/ec2keypair.yaml
|
||||
- import_tasks: roles/aws/tasks/iam.yaml
|
||||
- import_tasks: roles/aws/tasks/s3.yaml
|
||||
- import_tasks: roles/aws/tasks/s3policy.yaml
|
||||
- import_tasks: roles/aws/tasks/vpcdhcpopts.yaml
|
||||
- import_tasks: roles/aws/tasks/vpc.yaml
|
||||
- import_tasks: roles/aws/tasks/route53.yaml
|
||||
- import_tasks: roles/aws/tasks/igw.yaml
|
||||
- import_tasks: roles/aws/tasks/subnet.yaml
|
||||
- import_tasks: roles/aws/tasks/natgw.yaml
|
||||
- import_tasks: roles/aws/tasks/routetable.yaml
|
||||
- import_tasks: roles/aws/tasks/routetablerule.yaml
|
||||
- import_tasks: roles/aws/tasks/securitygroup.yaml
|
||||
- import_tasks: roles/aws/tasks/securitygrouprule.yaml
|
||||
- import_tasks: roles/aws/tasks/elb.yaml
|
||||
- import_tasks: roles/aws/tasks/ec2.yaml
|
||||
- import_tasks: roles/aws/tasks/eip.yaml
|
||||
- import_tasks: roles/aws/tasks/ec2elb.yaml
|
||||
- import_tasks: roles/aws/tasks/route53record.yaml
|
||||
- import_tasks: roles/aws/tasks/configfiles.yaml
|
||||
- import_tasks: roles/aws/tasks/configfilesdata.yaml
|
||||
17
reference-architecture/3.9/playbooks/deploy_aws_cns.yaml
Normal file
17
reference-architecture/3.9/playbooks/deploy_aws_cns.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
become: False
|
||||
vars_files:
|
||||
- vars/main.yaml
|
||||
tasks:
|
||||
- import_tasks: roles/aws/tasks/getcreds.yaml
|
||||
- import_tasks: roles/aws/tasks/getazs.yaml
|
||||
- import_tasks: roles/aws/tasks/getec2ami.yaml
|
||||
- import_tasks: roles/aws/tasks/vpcdhcpopts.yaml
|
||||
- import_tasks: roles/aws/tasks/vpc.yaml
|
||||
- import_tasks: roles/aws/tasks/subnet.yaml
|
||||
- import_tasks: roles/aws/tasks/securitygroup_cns.yaml
|
||||
- import_tasks: roles/aws/tasks/securitygrouprule_cns.yaml
|
||||
- import_tasks: roles/aws/tasks/ec2_cns.yaml
|
||||
- import_tasks: roles/aws/tasks/configfiles_cns.yaml
|
||||
- import_tasks: roles/aws/tasks/configfilesdata_cns.yaml
|
||||
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from ansible import errors
|
||||
import boto3
|
||||
|
||||
|
||||
try:
|
||||
# ansible-2.0
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
except ImportError:
|
||||
# ansible-1.9.x
|
||||
class LookupBase(object):
|
||||
def __init__(self, basedir=None, runner=None, **kwargs):
|
||||
self.runner = runner
|
||||
self.basedir = self.runner.basedir
|
||||
|
||||
def get_basedir(self, variables):
|
||||
return self.basedir
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def __init__(self, basedir=None, **kwargs):
|
||||
self.basedir = basedir
|
||||
|
||||
def run(self, args, inject=None, **kwargs):
|
||||
try:
|
||||
for a in list(args):
|
||||
if 'aws_region' in a:
|
||||
aws_region = a['aws_region']
|
||||
except Exception as e:
|
||||
raise errors.AnsibleError("%s" % (e))
|
||||
|
||||
try:
|
||||
zones = []
|
||||
response = boto3.client('ec2', aws_region).describe_availability_zones()
|
||||
for k in response['AvailabilityZones']:
|
||||
zones.append(k['ZoneName'])
|
||||
return(zones)
|
||||
except Exception as e:
|
||||
raise errors.AnsibleError("%s" % (e))
|
||||
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from ansible import errors
|
||||
import boto3
|
||||
|
||||
|
||||
try:
|
||||
# ansible-2.0
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
except ImportError:
|
||||
# ansible-1.9.x
|
||||
class LookupBase(object):
|
||||
def __init__(self, basedir=None, runner=None, **kwargs):
|
||||
self.runner = runner
|
||||
self.basedir = self.runner.basedir
|
||||
|
||||
def get_basedir(self, variables):
|
||||
return self.basedir
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def __init__(self, basedir=None, **kwargs):
|
||||
self.basedir = basedir
|
||||
|
||||
def run(self, args, inject=None, **kwargs):
|
||||
try:
|
||||
return(boto3.client('sts').
|
||||
get_caller_identity()['Arn'].
|
||||
split(':')[4])
|
||||
|
||||
except Exception as e:
|
||||
raise errors.AnsibleError("%s" % (e))
|
||||
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from ansible import errors
|
||||
import boto3
|
||||
|
||||
|
||||
try:
|
||||
# ansible-2.0
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
except ImportError:
|
||||
# ansible-1.9.x
|
||||
class LookupBase(object):
|
||||
def __init__(self, basedir=None, runner=None, **kwargs):
|
||||
self.runner = runner
|
||||
self.basedir = self.runner.basedir
|
||||
|
||||
def get_basedir(self, variables):
|
||||
return self.basedir
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def __init__(self, basedir=None, **kwargs):
|
||||
self.basedir = basedir
|
||||
|
||||
def run(self, args, inject=None, **kwargs):
|
||||
try:
|
||||
for a in list(args):
|
||||
if 'id' in a[0]:
|
||||
id = str(a[0]['id'])
|
||||
except Exception as e:
|
||||
raise errors.AnsibleError("%s" % (e))
|
||||
|
||||
try:
|
||||
client = boto3.client('route53')
|
||||
return(client.get_hosted_zone(Id=id)['DelegationSet']['NameServers'])
|
||||
except Exception as e:
|
||||
raise errors.AnsibleError("%s" % (e))
|
||||
@@ -0,0 +1,21 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} config files"
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: "{{ 'touch' if (state is undefined or 'absent' not in state) else 'absent' }}"
|
||||
register: touchfiles
|
||||
with_items:
|
||||
- "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-domaindelegation"
|
||||
- "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-cpk"
|
||||
- "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-s3"
|
||||
- "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-urls"
|
||||
- "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-hosts"
|
||||
- "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}"
|
||||
changed_when: "\
|
||||
touchfiles.diff is defined \
|
||||
and \
|
||||
( \
|
||||
( touchfiles.diff.before.state == 'absent' and touchfiles.diff.after.state == 'touch' ) \
|
||||
or \
|
||||
( touchfiles.diff.before.state == 'file' and touchfiles.diff.after.state == 'absent' ) \
|
||||
)"
|
||||
@@ -0,0 +1,17 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} config files (CNS)"
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: "{{ 'touch' if (state is undefined or 'absent' not in state) else 'absent' }}"
|
||||
register: touchfiles
|
||||
with_items:
|
||||
- "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-hostscns"
|
||||
- "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-hostsgfs"
|
||||
changed_when: "\
|
||||
touchfiles.diff is defined \
|
||||
and \
|
||||
( \
|
||||
( touchfiles.diff.before.state == 'absent' and touchfiles.diff.after.state == 'touch' ) \
|
||||
or \
|
||||
( touchfiles.diff.before.state == 'file' and touchfiles.diff.after.state == 'absent' ) \
|
||||
)"
|
||||
@@ -0,0 +1,73 @@
|
||||
---
|
||||
- name: Gather Route53 zone facts
|
||||
route53_facts:
|
||||
dns_name: "{{ clusterid }} + '.' + {{ dns_domain }}"
|
||||
query: hosted_zone
|
||||
register: r53_zones
|
||||
|
||||
- name: Filter Route53 zones
|
||||
set_fact:
|
||||
r53_zones: ['{% for zone in r53_zones.HostedZones -%}
|
||||
{%- if zone.Config.PrivateZone == false -%}
|
||||
{%- if clusterid ~ "." ~ dns_domain ~ "." in zone.Name -%}
|
||||
{ "Id": "{{ zone.Id | safe }}", "Name": "{{ zone.Name | safe }}" }
|
||||
{%- endif -%}
|
||||
{%- endif -%}
|
||||
{% endfor -%}']
|
||||
register: r53_zones
|
||||
when: "r53_zones.HostedZones | length > 0"
|
||||
|
||||
- name: Fetch SOA records Route53 public zone
|
||||
set_fact:
|
||||
ns: "{{ lookup('route53_namservers', args).split(',') }}"
|
||||
vars:
|
||||
- args: ['{ "id": "{{ r53_zones[0].Id }}" } ']
|
||||
when: "r53_zones | length == 1"
|
||||
|
||||
- debug:
|
||||
msg:
|
||||
- 'Error in env; AWS Route53 public zone for {{ clusterid }} is not available. Please rerun this play!'
|
||||
when: "( vpc_subnet_azs | length | int ) < 3"
|
||||
failed_when: "r53_zones.HostedZones | length != 1"
|
||||
|
||||
- name: "Land file content (clusterid domain delegation)"
|
||||
blockinfile:
|
||||
path: "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-domaindelegation"
|
||||
marker: "#<!-- {mark} OUTPUT -->"
|
||||
content: |
|
||||
{{ lookup('template', ( playbook_dir + '/roles/aws/templates/outputdomaindeleg.j2') ) }}
|
||||
|
||||
- name: "Land openshift-ansible installer cloudprovider_kind and credentials"
|
||||
blockinfile:
|
||||
path: "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-cpk"
|
||||
marker: "#<!-- {mark} OUTPUT -->"
|
||||
content: |
|
||||
{{ lookup('template', ( playbook_dir + '/roles/aws/templates/outputcpk.j2') ) }}
|
||||
|
||||
- name: "Land openshift-ansible installer registry storage kind and credentials"
|
||||
blockinfile:
|
||||
path: "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-s3"
|
||||
marker: "#<!-- {mark} OUTPUT -->"
|
||||
content: |
|
||||
{{ lookup('template', ( playbook_dir + '/roles/aws/templates/outputs3.j2') ) }}
|
||||
|
||||
- name: "Land openshift-ansible installer urls"
|
||||
blockinfile:
|
||||
path: "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-urls"
|
||||
marker: "#<!-- {mark} OUTPUT -->"
|
||||
content: |
|
||||
{{ lookup('template', ( playbook_dir + '/roles/aws/templates/outputurls.j2') ) }}
|
||||
|
||||
- name: "Land openshift-ansible installer host inventory"
|
||||
template:
|
||||
dest: "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-hosts"
|
||||
trim_blocks: no
|
||||
src: "{{ playbook_dir }}/roles/aws/templates/outputhosts.j2"
|
||||
|
||||
- name: "Land SSH config"
|
||||
blockinfile:
|
||||
path: "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}"
|
||||
create: yes
|
||||
marker: "#<!-- {mark} OUTPUT -->"
|
||||
content: |
|
||||
{{ lookup('template', ( playbook_dir + '/roles/aws/templates/ssh_config.j2') ) }}
|
||||
@@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: "Land openshift-ansible installer host inventory (CNS nodes)"
|
||||
blockinfile:
|
||||
path: "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-hostscns"
|
||||
create: yes
|
||||
marker: "#<!-- {mark} ANSIBLE MANAGED BLOCK -->"
|
||||
content: |
|
||||
{{ lookup('template', ( playbook_dir + '/roles/aws/templates/outputhostscns.j2') ) }}
|
||||
|
||||
- name: "Land openshift-ansible installer host inventory (GFS)"
|
||||
blockinfile:
|
||||
path: "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-hostsgfs"
|
||||
create: yes
|
||||
marker: "#<!-- {mark} ANSIBLE MANAGED BLOCK -->"
|
||||
content: |
|
||||
{{ lookup('template', ( playbook_dir + '/roles/aws/templates/outputhostsgfs.j2') ) }}
|
||||
342
reference-architecture/3.9/playbooks/roles/aws/tasks/ec2.yaml
Normal file
342
reference-architecture/3.9/playbooks/roles/aws/tasks/ec2.yaml
Normal file
@@ -0,0 +1,342 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} EC2 instance ( Bastion )"
|
||||
ec2:
|
||||
assign_public_ip: yes
|
||||
count_tag:
|
||||
Name: "bastion"
|
||||
exact_count: "{{ 1 if (state is undefined or 'absent' not in state) else 0 }}"
|
||||
group: [
|
||||
"bastion"
|
||||
]
|
||||
instance_type: "{{ ec2_type_bastion }}"
|
||||
image: "{{ ec2ami }}"
|
||||
instance_tags:
|
||||
Name: "bastion"
|
||||
clusterid: "{{ clusterid }}"
|
||||
ami: "{{ ec2ami }}"
|
||||
"kubernetes.io/cluster/{{ clusterid }}": "{{ clusterid }}"
|
||||
key_name: "{{ clusterid }}.{{ dns_domain }}"
|
||||
monitoring: no
|
||||
region: "{{ aws_region }}"
|
||||
termination_protection: no
|
||||
user_data: "{{ lookup('template', ( playbook_dir + '/roles/aws/templates/ec2_userdata.sh.j2') ) }}"
|
||||
volumes:
|
||||
- device_name: /dev/sda1
|
||||
volume_type: gp2
|
||||
volume_size: 25
|
||||
delete_on_termination: true
|
||||
vpc_subnet_id: "{{ item.subnet }}"
|
||||
wait: yes
|
||||
with_items:
|
||||
- name: "bastion"
|
||||
subnet: "{{ subnet_public.results.0.subnet.id if (state is undefined or 'absent' not in state) else '' }}"
|
||||
type: "{{ ec2_type_bastion }}"
|
||||
retries: 3
|
||||
delay: 3
|
||||
register: ec2bastion
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].id }}"
|
||||
- tagss: "clusterid={{ clusterid }}, ami={{ ec2ami }}"
|
||||
with_items: "{{ ec2bastion.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].block_device_mapping['/dev/sda1'].volume_id }}"
|
||||
- tagss: "Name={{ item.tagged_instances[0].id }}-rootvol, clusterid={{ clusterid }}, "
|
||||
with_items: "{{ ec2bastion.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} EC2 instances ( Master )"
|
||||
ec2:
|
||||
assign_public_ip: no
|
||||
count_tag:
|
||||
Name: "{{ item.name }}"
|
||||
exact_count: "{{ 1 if (state is undefined or 'absent' not in state) else 0 }}"
|
||||
group: [
|
||||
"master",
|
||||
"node"
|
||||
]
|
||||
instance_type: "{{ item.type }}"
|
||||
image: "{{ ec2ami }}"
|
||||
instance_tags:
|
||||
Name: "{{ item.name }}"
|
||||
key_name: "{{ clusterid }}.{{ dns_domain }}"
|
||||
monitoring: no
|
||||
region: "{{ aws_region }}"
|
||||
termination_protection: no
|
||||
user_data: "{{ lookup('template', ( playbook_dir + '/roles/aws/templates/ec2_userdata.sh.j2') ) }}"
|
||||
volumes:
|
||||
- device_name: /dev/sda1
|
||||
volume_type: gp2
|
||||
volume_size: 100
|
||||
- device_name: /dev/xvdb
|
||||
volume_type: gp2
|
||||
volume_size: 100
|
||||
- device_name: /dev/xvdc
|
||||
volume_type: gp2
|
||||
volume_size: 100
|
||||
- device_name: /dev/xvdd
|
||||
volume_type: gp2
|
||||
volume_size: 100
|
||||
delete_on_termination: true
|
||||
vpc_subnet_id: "{{ item.subnet }}"
|
||||
wait: yes
|
||||
with_items: "\
|
||||
{%- set i = 1 -%}
|
||||
{%- set j = 0 -%}
|
||||
{%- for k in range(0, ( ec2_count_master if ec2_count_master is defined else 3 ) ) -%}
|
||||
{
|
||||
'name': 'master{{ i }}',
|
||||
'subnet': '{{ subnet_private.results[j].subnet.id if subnet_private is defined else '' }}',
|
||||
'type': '{{ ec2_type_master }}'
|
||||
},
|
||||
{%- if i <= ( ec2_count_master if ec2_count_master is defined else 3 ) -%}
|
||||
{%- set i = i + 1 -%}
|
||||
{%- endif -%}
|
||||
{%- if subnet_private is defined -%}
|
||||
{%- if j < subnet_private.results | length - 1 -%}
|
||||
{%- set j = j + 1 -%}
|
||||
{%- else -%}
|
||||
{%- set j = 0 -%}
|
||||
{%- endif -%}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
"
|
||||
retries: 3
|
||||
delay: 3
|
||||
register: ec2master
|
||||
|
||||
- name: "Pause for 30 to allow for AWS to {{ 'instantiate' if (state is undefined or 'absent' not in state) else 'terminate' }}"
|
||||
pause:
|
||||
seconds: 30
|
||||
when: ec2master is changed
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].id }}"
|
||||
- tagss: "clusterid={{ clusterid }}, ami={{ ec2ami }}, kubernetes.io/cluster/{{ clusterid }}={{ clusterid }}, host-type=master, sub-host-type=default"
|
||||
with_items: "{{ ec2master.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].block_device_mapping['/dev/sda1'].volume_id }}"
|
||||
- tagss: "Name={{ item.tagged_instances[0].id }}-rootvol, clusterid={{ clusterid }}, "
|
||||
with_items: "{{ ec2master.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].block_device_mapping['/dev/xvdb'].volume_id }}"
|
||||
- tagss: "Name={{ item.tagged_instances[0].id }}-xvdb, clusterid={{ clusterid }}, "
|
||||
with_items: "{{ ec2master.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].block_device_mapping['/dev/xvdc'].volume_id }}"
|
||||
- tagss: "Name={{ item.tagged_instances[0].id }}-xvdc, clusterid={{ clusterid }}, "
|
||||
with_items: "{{ ec2master.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].block_device_mapping['/dev/xvdd'].volume_id }}"
|
||||
- tagss: "Name={{ item.tagged_instances[0].id }}-xvdd, clusterid={{ clusterid }}, "
|
||||
with_items: "{{ ec2master.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} EC2 instances ( Infrastructure )"
|
||||
ec2:
|
||||
assign_public_ip: no
|
||||
count_tag:
|
||||
Name: "{{ item.name }}"
|
||||
exact_count: "{{ 1 if (state is undefined or 'absent' not in state) else 0 }}"
|
||||
group: [
|
||||
"infra",
|
||||
"node"
|
||||
]
|
||||
instance_type: "{{ item.type }}"
|
||||
image: "{{ ec2ami }}"
|
||||
instance_tags:
|
||||
Name: "{{ item.name }}"
|
||||
key_name: "{{ clusterid }}.{{ dns_domain }}"
|
||||
monitoring: no
|
||||
region: "{{ aws_region }}"
|
||||
termination_protection: no
|
||||
user_data: "{{ lookup('template', ( playbook_dir + '/roles/aws/templates/ec2_userdata.sh.j2') ) }}"
|
||||
volumes:
|
||||
- device_name: /dev/sda1
|
||||
volume_type: gp2
|
||||
volume_size: 100
|
||||
- device_name: /dev/xvdb
|
||||
volume_type: gp2
|
||||
volume_size: 100
|
||||
- device_name: /dev/xvdc
|
||||
volume_type: gp2
|
||||
volume_size: 100
|
||||
vpc_subnet_id: "{{ item.subnet }}"
|
||||
wait: yes
|
||||
with_items: "\
|
||||
{%- set i = 1 -%}
|
||||
{%- set j = 0 -%}
|
||||
{%- for k in range(0, ( ec2_count_infra if ec2_count_infra is defined else 3 ) ) -%}
|
||||
{
|
||||
'name': 'infra{{ i }}',
|
||||
'subnet': '{{ subnet_private.results[j].subnet.id if (state is undefined or 'absent' not in state) else '' }}',
|
||||
'type': '{{ ec2_type_infra }}'
|
||||
},
|
||||
{%- if i <= ( ec2_count_infra if ec2_count_infra is defined else 3 ) -%}
|
||||
{%- set i = i + 1 -%}
|
||||
{%- endif -%}
|
||||
{%- if subnet_private is defined -%}
|
||||
{%- if j < subnet_private.results | length - 1 -%}
|
||||
{%- set j = j + 1 -%}
|
||||
{%- else -%}
|
||||
{%- set j = 0 -%}
|
||||
{%- endif -%}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
"
|
||||
retries: 3
|
||||
delay: 3
|
||||
register: ec2infra
|
||||
|
||||
- name: "Pause for 30 to allow for AWS to {{ 'instantiate' if (state is undefined or 'absent' not in state) else 'terminate' }}"
|
||||
pause:
|
||||
seconds: 30
|
||||
when: ec2infra is changed
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].id }}"
|
||||
- tagss: "clusterid={{ clusterid }}, ami={{ ec2ami }}, kubernetes.io/cluster/{{ clusterid }}={{ clusterid }}, host-type=node, sub-host-type=infra"
|
||||
with_items: "{{ ec2infra.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].block_device_mapping['/dev/sda1'].volume_id }}"
|
||||
- tagss: "Name={{ item.tagged_instances[0].id }}-rootvol, clusterid={{ clusterid }}, "
|
||||
with_items: "{{ ec2infra.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].block_device_mapping['/dev/xvdb'].volume_id }}"
|
||||
- tagss: "Name={{ item.tagged_instances[0].id }}-xvdb, clusterid={{ clusterid }}, "
|
||||
with_items: "{{ ec2infra.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].block_device_mapping['/dev/xvdc'].volume_id }}"
|
||||
- tagss: "Name={{ item.tagged_instances[0].id }}-xvdc, clusterid={{ clusterid }}, "
|
||||
with_items: "{{ ec2infra.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} EC2 instances ( App )"
|
||||
ec2:
|
||||
assign_public_ip: no
|
||||
count_tag:
|
||||
Name: "{{ item.name }}"
|
||||
exact_count: "{{ 1 if (state is undefined or 'absent' not in state) else 0 }}"
|
||||
group: [
|
||||
"node"
|
||||
]
|
||||
instance_type: "{{ item.type }}"
|
||||
image: "{{ ec2ami }}"
|
||||
instance_tags:
|
||||
Name: "{{ item.name }}"
|
||||
key_name: "{{ clusterid }}.{{ dns_domain }}"
|
||||
monitoring: no
|
||||
region: "{{ aws_region }}"
|
||||
termination_protection: no
|
||||
user_data: "{{ lookup('template', ( playbook_dir + '/roles/aws/templates/ec2_userdata.sh.j2') ) }}"
|
||||
volumes:
|
||||
- device_name: /dev/sda1
|
||||
volume_type: gp2
|
||||
volume_size: 100
|
||||
- device_name: /dev/xvdb
|
||||
volume_type: gp2
|
||||
volume_size: 100
|
||||
- device_name: /dev/xvdc
|
||||
volume_type: gp2
|
||||
volume_size: 100
|
||||
vpc_subnet_id: "{{ item.subnet }}"
|
||||
wait: yes
|
||||
with_items: "\
|
||||
{%- set i = 1 -%}
|
||||
{%- set j = 0 -%}
|
||||
{%- for k in range(0, ( ec2_count_node if ec2_count_node is defined else 3 ) ) -%}
|
||||
{
|
||||
'name': 'node{{ i }}',
|
||||
'subnet': '{{ subnet_private.results[j].subnet.id if subnet_private is defined else '' }}',
|
||||
'type': '{{ ec2_type_node }}'
|
||||
},
|
||||
{%- if i <= ( ec2_count_node if ec2_count_node is defined else 3 ) -%}
|
||||
{%- set i = i + 1 -%}
|
||||
{%- endif -%}
|
||||
{%- if subnet_private is defined -%}
|
||||
{%- if j < subnet_private.results | length - 1 -%}
|
||||
{%- set j = j + 1 -%}
|
||||
{%- else -%}
|
||||
{%- set j = 0 -%}
|
||||
{%- endif -%}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
"
|
||||
retries: 3
|
||||
delay: 3
|
||||
register: ec2node
|
||||
|
||||
- name: "Pause for 30 to allow for AWS to {{ 'instantiate' if (state is undefined or 'absent' not in state) else 'terminate' }}"
|
||||
pause:
|
||||
seconds: 30
|
||||
when: ec2node is changed
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].id }}"
|
||||
- tagss: "clusterid={{ clusterid }}, ami={{ ec2ami }}, kubernetes.io/cluster/{{ clusterid }}={{ clusterid }}, host-type=node, sub-host-type=compute"
|
||||
with_items: "{{ ec2node.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].block_device_mapping['/dev/sda1'].volume_id }}"
|
||||
- tagss: "Name={{ item.tagged_instances[0].id }}-rootvol, clusterid={{ clusterid }}, "
|
||||
with_items: "{{ ec2node.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].block_device_mapping['/dev/xvdb'].volume_id }}"
|
||||
- tagss: "Name={{ item.tagged_instances[0].id }}-xvdb, clusterid={{ clusterid }}, "
|
||||
with_items: "{{ ec2node.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].block_device_mapping['/dev/xvdc'].volume_id }}"
|
||||
- tagss: "Name={{ item.tagged_instances[0].id }}-xvdc, clusterid={{ clusterid }}, "
|
||||
with_items: "{{ ec2node.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
@@ -0,0 +1,85 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} EC2 instances ( CNS )"
|
||||
ec2:
|
||||
assign_public_ip: no
|
||||
count_tag:
|
||||
Name: "{{ item.name }}"
|
||||
exact_count: "{{ 1 if (state is undefined or 'absent' not in state) else 0 }}"
|
||||
group: [
|
||||
"cns",
|
||||
"node"
|
||||
]
|
||||
instance_type: "{{ item.type }}"
|
||||
image: "{{ ec2ami }}"
|
||||
instance_tags:
|
||||
Name: "{{ item.name }}"
|
||||
key_name: "{{ clusterid }}.{{ dns_domain }}"
|
||||
monitoring: no
|
||||
region: "{{ aws_region }}"
|
||||
termination_protection: no
|
||||
user_data: "{{ lookup('template', ( playbook_dir + '/roles/aws/templates/ec2_userdata.sh.j2') ) }}"
|
||||
volumes:
|
||||
- device_name: /dev/sda1
|
||||
volume_type: gp2
|
||||
volume_size: 100
|
||||
- device_name: /dev/xvdb
|
||||
volume_type: gp2
|
||||
volume_size: 100
|
||||
- device_name: /dev/xvdc
|
||||
volume_type: gp2
|
||||
volume_size: 100
|
||||
- device_name: /dev/xvdd
|
||||
volume_type: gp2
|
||||
volume_size: 100
|
||||
delete_on_termination: true
|
||||
vpc_subnet_id: "{{ item.subnet }}"
|
||||
wait: yes
|
||||
with_items: "\
|
||||
{%- set i = 1 -%}
|
||||
{%- set j = 0 -%}
|
||||
{%- for k in range(0, ( ec2_count_cns if ec2_count_cns is defined else 3 ) ) -%}
|
||||
{%- set id = subnet_private.results[j].subnet.id if subnet_private is defined else '' -%}
|
||||
{
|
||||
'name': 'cns{{ i }}',
|
||||
'subnet': '{{ id }}',
|
||||
'type': '{{ ec2_type_cns }}'
|
||||
},
|
||||
{%- if i <= ( ec2_count_cns if ec2_count_cns is defined else 3 ) -%}
|
||||
{%- set i = i + 1 -%}
|
||||
{%- endif -%}
|
||||
{%- if subnet_private is defined -%}
|
||||
{%- if j < subnet_private.results | length - 1 -%}
|
||||
{%- set j = j + 1 -%}
|
||||
{%- else -%}
|
||||
{%- set j = 0 -%}
|
||||
{%- endif -%}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
"
|
||||
retries: 3
|
||||
delay: 3
|
||||
register: ec2cns
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].id }}"
|
||||
- tagss: "clusterid={{ clusterid }}, ami={{ ec2ami }}, kubernetes.io/cluster/{{ clusterid }}={{ clusterid }}, host-type=node, sub-host-type=cns"
|
||||
with_items: "{{ ec2cns.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].block_device_mapping['/dev/sda1'].volume_id }}"
|
||||
- tagss: "Name={{ item.tagged_instances[0].id }}-rootvol, clusterid={{ clusterid }}, "
|
||||
with_items: "{{ ec2cns.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.tagged_instances[0].block_device_mapping['/dev/xvdb'].volume_id }}"
|
||||
- tagss: "Name={{ item.tagged_instances[0].id }}-xvdb, clusterid={{ clusterid }}, "
|
||||
with_items: "{{ ec2cns.results }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
@@ -0,0 +1,33 @@
|
||||
---
|
||||
- name: Register EC2 instances to internet-facing master ELB
|
||||
ec2_elb:
|
||||
instance_id: "{{ item.tagged_instances[0].id }}"
|
||||
ec2_elbs: "{{ elbextmaster.results[0].elb.name }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: present
|
||||
with_items:
|
||||
- "{{ ec2master.results }}"
|
||||
register: test
|
||||
failed_when: "'InvalidInstanceID.NotFound' in test"
|
||||
|
||||
- name: Register EC2 instances to internal master ELB
|
||||
ec2_elb:
|
||||
instance_id: "{{ item.tagged_instances[0].id }}"
|
||||
ec2_elbs: "{{ elbintmaster.results[0].elb.name }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: present
|
||||
with_items:
|
||||
- "{{ ec2master.results }}"
|
||||
register: test
|
||||
failed_when: "'InvalidInstanceID.NotFound' in test"
|
||||
|
||||
- name: Register EC2 instances to internet-facing infra ELB
|
||||
ec2_elb:
|
||||
instance_id: "{{ item.tagged_instances[0].id }}"
|
||||
ec2_elbs: "{{ elbextinfra.results[0].elb.name }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: present
|
||||
with_items:
|
||||
- "{{ ec2infra.results }}"
|
||||
register: test
|
||||
failed_when: "'InvalidInstanceID.NotFound' in test"
|
||||
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} EC2 KeyPair"
|
||||
ec2_key:
|
||||
key_material: "{{ lookup('file', '~/.ssh/' + clusterid + '.' + dns_domain + '.pub') | expanduser if (state is undefined or 'absent' not in state) else '' }}"
|
||||
name: "{{ clusterid }}.{{ dns_domain }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
@@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: Create EIP instance for EC2 / Bastion
|
||||
ec2_eip:
|
||||
device_id: "{{ ec2bastion.results[0].tagged_instances[0].id }}"
|
||||
in_vpc: yes
|
||||
region: "{{ aws_region }}"
|
||||
state: present
|
||||
retries: 3
|
||||
register: eipbastion
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
108
reference-architecture/3.9/playbooks/roles/aws/tasks/elb.yaml
Normal file
108
reference-architecture/3.9/playbooks/roles/aws/tasks/elb.yaml
Normal file
@@ -0,0 +1,108 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} ELB ( master external )"
|
||||
ec2_elb_lb:
|
||||
cross_az_load_balancing: "yes"
|
||||
health_check:
|
||||
ping_protocol: https
|
||||
ping_port: 443
|
||||
ping_path: "/api"
|
||||
response_timeout: 2
|
||||
interval: 5
|
||||
unhealthy_threshold: 2
|
||||
healthy_threshold: 3
|
||||
listeners:
|
||||
- protocol: tcp
|
||||
load_balancer_port: 443
|
||||
instance_protocol: tcp
|
||||
instance_port: 443
|
||||
name: "{{ clusterid }}-master-external"
|
||||
region: "{{ aws_region }}"
|
||||
scheme: internet-facing
|
||||
security_group_names:
|
||||
- "master"
|
||||
state: "{{ state | default('present') }}"
|
||||
subnets: "{{ item.subnets }}"
|
||||
tags: "{{ {'kubernetes.io/cluster/' ~ clusterid: clusterid, 'clusterid': clusterid} }}"
|
||||
wait: yes
|
||||
retries: 3
|
||||
delay: 3
|
||||
register: elbextmaster
|
||||
with_items:
|
||||
- subnets: "{{ [
|
||||
subnet_public.results.0.subnet.id,
|
||||
subnet_public.results.1.subnet.id,
|
||||
subnet_public.results.2.subnet.id
|
||||
] if (state is undefined or 'absent' not in state) else [] }}"
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} ELB ( master internal )"
|
||||
ec2_elb_lb:
|
||||
cross_az_load_balancing: "yes"
|
||||
health_check:
|
||||
ping_protocol: https
|
||||
ping_port: 443
|
||||
ping_path: "/api"
|
||||
response_timeout: 2
|
||||
interval: 5
|
||||
unhealthy_threshold: 2
|
||||
healthy_threshold: 3
|
||||
listeners:
|
||||
- protocol: tcp
|
||||
load_balancer_port: 443
|
||||
instance_protocol: tcp
|
||||
instance_port: 443
|
||||
name: "{{ clusterid }}-master-internal"
|
||||
region: "{{ aws_region }}"
|
||||
scheme: internal
|
||||
security_group_names:
|
||||
- "master"
|
||||
state: "{{ state | default('present') }}"
|
||||
subnets: "{{ item.subnets }}"
|
||||
tags: "{{ {'kubernetes.io/cluster/' ~ clusterid: clusterid, 'clusterid': clusterid} }}"
|
||||
wait: yes
|
||||
retries: 3
|
||||
delay: 3
|
||||
register: elbintmaster
|
||||
with_items:
|
||||
- subnets: "{{ [
|
||||
subnet_private.results.0.subnet.id,
|
||||
subnet_private.results.1.subnet.id,
|
||||
subnet_private.results.2.subnet.id
|
||||
] if (state is undefined or 'absent' not in state) else [] }}"
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} ELB ( infra public )"
|
||||
ec2_elb_lb:
|
||||
cross_az_load_balancing: "yes"
|
||||
health_check:
|
||||
ping_protocol: tcp
|
||||
ping_port: 443
|
||||
response_timeout: 2
|
||||
interval: 5
|
||||
unhealthy_threshold: 2
|
||||
healthy_threshold: 2
|
||||
listeners:
|
||||
- protocol: tcp
|
||||
load_balancer_port: 80
|
||||
instance_protocol: tcp
|
||||
instance_port: 80
|
||||
- protocol: tcp
|
||||
load_balancer_port: 443
|
||||
instance_protocol: tcp
|
||||
instance_port: 443
|
||||
name: "{{ clusterid }}-infra"
|
||||
region: "{{ aws_region }}"
|
||||
scheme: internet-facing
|
||||
security_group_names:
|
||||
- "infra"
|
||||
state: "{{ state | default('present') }}"
|
||||
subnets: "{{ item.subnets }}"
|
||||
tags: "{{ {'kubernetes.io/cluster/' ~ clusterid: clusterid, 'clusterid': clusterid} }}"
|
||||
wait: yes
|
||||
retries: 3
|
||||
delay: 3
|
||||
register: elbextinfra
|
||||
with_items:
|
||||
- subnets: "{{ [
|
||||
subnet_public.results.0.subnet.id,
|
||||
subnet_public.results.1.subnet.id,
|
||||
subnet_public.results.2.subnet.id
|
||||
] if (state is undefined or 'absent' not in state) else [] }}"
|
||||
@@ -0,0 +1,63 @@
|
||||
---
|
||||
- name: Gather Route53 zone facts
|
||||
route53_facts:
|
||||
query: hosted_zone
|
||||
register: r53_zones
|
||||
|
||||
- name: Filter Route53 zones
|
||||
set_fact:
|
||||
r53_zones: ['{% for zone in r53_zones.HostedZones -%}
|
||||
{%- if clusterid ~ ".sysdeseng.com." in zone.Name -%}
|
||||
{%- if zone.Config.PrivateZone == true -%}
|
||||
{ "Id": "{{ zone.Id | safe }}", "Name": "{{ zone.Name | safe }}", "PrivateZone": true },
|
||||
{%- else -%}
|
||||
{ "Id": "{{ zone.Id | safe }}", "Name": "{{ zone.Name | safe }}" , "PrivateZone": false },
|
||||
{%- endif -%}
|
||||
{%- endif -%}
|
||||
{% endfor -%}']
|
||||
register: r53_zones
|
||||
|
||||
- name: Gather Route53 record facts
|
||||
ignore_errors: True
|
||||
route53:
|
||||
command: get
|
||||
private_zone: "{{ item.private_zone }}"
|
||||
record: "{{ item.record }}"
|
||||
type: CNAME
|
||||
zone: "{{ item.zone }}"
|
||||
register: r53_record
|
||||
with_items:
|
||||
- private_zone: no
|
||||
record: "{{ 'master' + '.' + clusterid + '.' + dns_domain }}"
|
||||
type: 'CNAME'
|
||||
zone: "{{ clusterid + '.' + dns_domain }}."
|
||||
- private_zone: no
|
||||
record: "*.apps.{{ clusterid }}.{{ dns_domain }}"
|
||||
type: 'CNAME'
|
||||
zone: "{{ clusterid + '.' + dns_domain }}."
|
||||
- private_zone: yes
|
||||
record: "{{ 'master' + '.' + clusterid + '.' + dns_domain }}"
|
||||
zone: "{{ clusterid + '.' + dns_domain }}."
|
||||
- private_zone: yes
|
||||
record: "*.apps.{{ clusterid }}.{{ dns_domain }}"
|
||||
type: 'CNAME'
|
||||
zone: "{{ clusterid + '.' + dns_domain }}."
|
||||
changed_when: "( r53_record.msg is defined ) and ( 'filler msg to prevent changed task' in r53_record.msg )"
|
||||
failed_when: "( r53_record.msg is defined ) and ( 'filler msg to prevent failed task' in r53_record.msg )"
|
||||
|
||||
- name: Gather VPC facts
|
||||
ec2_vpc_net_facts:
|
||||
filters:
|
||||
tag:Name: "{{ clusterid }}"
|
||||
region: "{{ aws_region }}"
|
||||
register: vpc
|
||||
|
||||
- name: Gather NatGW facts
|
||||
ec2_vpc_nat_gateway_facts:
|
||||
filters: "{{ {
|
||||
'tag:kubernetes.io/cluster/' ~ clusterid: clusterid,
|
||||
'tag:clusterid': clusterid,
|
||||
'state': 'available'
|
||||
} }}"
|
||||
region: "{{ aws_region }}"
|
||||
register: natgws
|
||||
@@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: "Set fact: availability_zones"
|
||||
set_fact:
|
||||
vpc_subnet_azs: "{{ lookup('ec2_zones_by_region', creds).split(',') }}"
|
||||
|
||||
- debug:
|
||||
msg:
|
||||
- 'Error in env; AWS VPC does NOT contain 3 at least availability zones. Please pick another region!'
|
||||
when: "( vpc_subnet_azs | length | int ) < 3"
|
||||
failed_when: "(vpc_subnet_azs | length | int ) < 3"
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: "Retrive credentials from env vars"
|
||||
ignore_errors: true
|
||||
no_log: True
|
||||
set_fact:
|
||||
aws_access_key: "{{ lookup('env','AWS_ACCESS_KEY_ID') }}"
|
||||
aws_secret_key: "{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}"
|
||||
|
||||
- name: "Retrive credentials from creds file"
|
||||
ignore_errors: true
|
||||
no_log: True
|
||||
set_fact:
|
||||
aws_access_key: "{{ lookup('ini', 'aws_access_key_id section={{ aws_cred_profile }} file=~/.aws/credentials') }}"
|
||||
aws_secret_key: "{{ lookup('ini', 'aws_secret_access_key section={{ aws_cred_profile }} file=~/.aws/credentials') }}"
|
||||
when: ( not aws_access_key ) and ( not aws_secret_key )
|
||||
|
||||
- debug:
|
||||
msg:
|
||||
- 'Error in env; Unable to discover AWS_ACCESS_KEY_ID'
|
||||
when: not aws_access_key
|
||||
failed_when: not aws_access_key
|
||||
|
||||
- debug:
|
||||
msg:
|
||||
- 'Error in env; Unable to discover AWS_SECRET_ACCESS_KEY'
|
||||
when: not aws_secret_key
|
||||
failed_when: not aws_secret_key
|
||||
|
||||
- name: 'Set fact: arg for lookup queries'
|
||||
set_fact:
|
||||
creds:
|
||||
aws_access_key: "{{ aws_access_key }}"
|
||||
aws_secret_key: "{{ aws_secret_key }}"
|
||||
aws_region: "{{ aws_region }}"
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
- block:
|
||||
- name: Fetch Red Hat Cloud Access ami
|
||||
shell: aws ec2 describe-images \
|
||||
--region "{{ aws_region }}" --owners 309956199498 | \
|
||||
jq -r '.Images[] | [.Name,.ImageId] | @csv' | \
|
||||
sed -e 's/\"//g' | \
|
||||
grep -v Beta | \
|
||||
grep -i Access2-GP2 | \
|
||||
grep -i "{{ rhel_release }}" | \
|
||||
sort | \
|
||||
tail -1
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: ec2ami_unformatted
|
||||
changed_when: "'ami-' not in ec2ami_unformatted.stdout"
|
||||
|
||||
- name: 'NOTICE! Red Hat Cloud Access machine image not found'
|
||||
vars:
|
||||
notice: |
|
||||
NOTICE! Red Hat Cloud Access machine image not found!
|
||||
Please verify the process has been completed successfully.
|
||||
|
||||
See the following url...
|
||||
https://access.redhat.com/cloude/manager/gold_imports/new
|
||||
debug:
|
||||
msg: "{{ notice.split('\n') }}"
|
||||
when: ec2ami_unformatted.changed
|
||||
failed_when: "'ami-' not in ec2ami.stdout"
|
||||
|
||||
- name: 'Set fact: ec2ami'
|
||||
set_fact:
|
||||
ec2ami: "{{ ec2ami_unformatted.stdout.split(',')[1] }}"
|
||||
when: ec2ami is not defined
|
||||
@@ -0,0 +1,97 @@
|
||||
---
|
||||
- name: Fetch IAM idenity
|
||||
set_fact:
|
||||
iam_identity: "{{ lookup('iam_identity') | replace (',', '') }}"
|
||||
when:
|
||||
- ( state is not defined ) or ( 'absent' not in state )
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} IAM user ( cloudprovider_kind )"
|
||||
iam:
|
||||
iam_type: user
|
||||
name: "{{ clusterid }}.{{ dns_domain }}-admin"
|
||||
state: "{{ state | default('present') }}"
|
||||
access_key_state: create
|
||||
register: cpkuser
|
||||
|
||||
- name: "Pause for 15s to allow for IAM to instantiate"
|
||||
pause:
|
||||
seconds: 15
|
||||
when:
|
||||
- ( state is not defined ) or ( 'absent' not in state )
|
||||
- cpkuser.changed
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} IAM user access key file ( cloudprovider_kind )"
|
||||
file:
|
||||
path: "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-cpkuser_access_key"
|
||||
state: "{{ state | default('present') }}"
|
||||
when:
|
||||
- ( state is defined ) and ( 'absent' in state )
|
||||
- cpkuser.changed
|
||||
|
||||
- name: IAM cloudprovider_kind access key content
|
||||
blockinfile:
|
||||
path: "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-cpkuser_access_key"
|
||||
create: yes
|
||||
marker: "<!-- {mark} OUTPUT -->"
|
||||
state: "{{ state | default('present') }}"
|
||||
content: |
|
||||
openshift_cloudprovider_aws_access_key={{ cpkuser.user_meta.access_keys[0].access_key_id }}
|
||||
openshift_cloudprovider_aws_secret_key={{ cpkuser.user_meta.access_keys[0].secret_access_key }}
|
||||
when:
|
||||
- ( state is not defined ) or ( 'absent' not in state )
|
||||
- cpkuser.changed
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} IAM user policy ( cloudprovider_kind )"
|
||||
iam_policy:
|
||||
iam_type: user
|
||||
iam_name: "{{ clusterid }}.{{ dns_domain }}-admin"
|
||||
policy_name: "Admin"
|
||||
state: "{{ state | default('present') }}"
|
||||
policy_json: "{{ lookup('template', playbook_dir + '/roles/aws/templates/iam_policy_cpkuser.json.j2') }}"
|
||||
when:
|
||||
- ( state is not defined ) or ( 'absent' not in state )
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} IAM user ( hosted registry )"
|
||||
iam:
|
||||
iam_type: user
|
||||
name: "{{ clusterid }}.{{ dns_domain }}-registry"
|
||||
state: "{{ state | default('present') }}"
|
||||
access_key_state: create
|
||||
register: s3user
|
||||
|
||||
- name: "Pause for 15s to allow for IAM to instantiate"
|
||||
pause:
|
||||
seconds: 15
|
||||
when:
|
||||
- ( state is not defined ) or ( 'absent' not in state )
|
||||
- s3user.changed
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} IAM user access key file ( hosted registry )"
|
||||
file:
|
||||
path: "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-s3user_access_key"
|
||||
state: "{{ state | default('present') }}"
|
||||
when:
|
||||
- ( state is defined ) and ( 'absent' in state )
|
||||
- s3user.changed
|
||||
|
||||
- name: IAM s3user access key into content
|
||||
blockinfile:
|
||||
path: "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-s3user_access_key"
|
||||
create: yes
|
||||
marker: "<!-- {mark} OUTPUT -->"
|
||||
content: |
|
||||
openshift_hosted_registry_storage_s3_accesskey={{ s3user.user_meta.access_keys[0].access_key_id }}
|
||||
openshift_hosted_registry_storage_s3_secretkey={{ s3user.user_meta.access_keys[0].secret_access_key }}
|
||||
when:
|
||||
- ( state is not defined ) or ( 'absent' not in state )
|
||||
- s3user.changed
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} IAM user policy ( hosted registry )"
|
||||
iam_policy:
|
||||
iam_type: user
|
||||
iam_name: "{{ clusterid }}.{{ dns_domain }}-registry"
|
||||
policy_name: "S3"
|
||||
state: "{{ state | default('present') }}"
|
||||
policy_json: "{{ lookup('template', playbook_dir + '/roles/aws/templates/iam_policy_s3user.json.j2') }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
@@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} IGW"
|
||||
ec2_vpc_igw:
|
||||
region: "{{ aws_region }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
vpc_id: "{{ vpc.vpc.id if (state is undefined or 'absent' not in state) else vpc.vpcs[0].vpc_id }}"
|
||||
retries: 3
|
||||
delay: 3
|
||||
register: igw
|
||||
when: ( vpc.vpc is defined ) or ( vpc.vpcs[0] is defined )
|
||||
@@ -0,0 +1,77 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} NAT Gateway"
|
||||
ec2_vpc_nat_gateway:
|
||||
if_exist_do_not_create: yes
|
||||
nat_gateway_id: "{{ '' if (state is undefined or 'absent' not in state) else natgws.result[0].nat_gateway_id }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
subnet_id: "{{ subnet_public.results.0.subnet.id if (state is undefined or 'absent' not in state) else '' }}"
|
||||
wait: yes
|
||||
register: natgw1
|
||||
retries: 3
|
||||
delay: 3
|
||||
when: "\
|
||||
( ( subnet_public is defined ) and ( subnet_public.results.0 is defined ) ) \
|
||||
or \
|
||||
( natgws.result[0] is defined )"
|
||||
|
||||
- name: "Pause for 30 to allow for AWS to {{ 'instantiate' if (state is undefined or 'absent' not in state) else 'terminate' }}"
|
||||
pause:
|
||||
seconds: 30
|
||||
when: natgw1 is changed
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} NAT Gateway"
|
||||
ec2_vpc_nat_gateway:
|
||||
if_exist_do_not_create: yes
|
||||
nat_gateway_id: "{{ '' if (state is undefined or 'absent' not in state) else natgws.result[1].nat_gateway_id }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
subnet_id: "{{ subnet_public.results.1.subnet.id if (state is undefined or 'absent' not in state) else '' }}"
|
||||
wait: yes
|
||||
register: natgw2
|
||||
retries: 3
|
||||
delay: 3
|
||||
when: "\
|
||||
( ( subnet_public is defined ) and ( subnet_public.results.1 is defined ) ) \
|
||||
or \
|
||||
( natgws.result[1] is defined )"
|
||||
|
||||
- name: "Pause for 30 to allow for AWS to {{ 'instantiate' if (state is undefined or 'absent' not in state) else 'terminate' }}"
|
||||
pause:
|
||||
seconds: 30
|
||||
when: natgw2 is changed
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} NAT Gateway"
|
||||
ec2_vpc_nat_gateway:
|
||||
if_exist_do_not_create: yes
|
||||
nat_gateway_id: "{{ '' if (state is undefined or 'absent' not in state) else natgws.result[2].nat_gateway_id }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
subnet_id: "{{ subnet_public.results.2.subnet.id if (state is undefined or 'absent' not in state) else '' }}"
|
||||
wait: yes
|
||||
register: natgw3
|
||||
retries: 3
|
||||
delay: 3
|
||||
when: "\
|
||||
( ( subnet_public is defined ) and ( subnet_public.results.2 is defined ) ) \
|
||||
or \
|
||||
( natgws.result[2] is defined )"
|
||||
|
||||
- name: "Pause for 30 to allow for AWS to {{ 'instantiate' if (state is undefined or 'absent' not in state) else 'terminate' }}"
|
||||
pause:
|
||||
seconds: 30
|
||||
when: natgw3 is changed
|
||||
|
||||
- include_tasks: tag.yaml
|
||||
vars:
|
||||
- resource: "{{ item.nat.nat_gateway_id }}"
|
||||
- tagss: "Name={{ item.az }}, clusterid={{ clusterid }}, kubernetes.io/cluster/{{ clusterid }}={{ clusterid }}"
|
||||
with_items:
|
||||
- az: "{{ vpc_subnet_azs.0 }}"
|
||||
nat: "{{ natgw1 }}"
|
||||
- az: "{{ vpc_subnet_azs.1 }}"
|
||||
nat: "{{ natgw2 }}"
|
||||
- az: "{{ vpc_subnet_azs.2 }}"
|
||||
nat: "{{ natgw3 }}"
|
||||
when:
|
||||
- ( state is undefined ) or ( 'absent' not in state )
|
||||
@@ -0,0 +1,27 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} Route53 zone"
|
||||
route53_zone:
|
||||
comment: "{{ clusterid }}.{{ dns_domain }}"
|
||||
hosted_zone_id: "{{ '' if (state is undefined or 'absent' not in state) else item.id | replace('/hostedzone/', '') }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
vpc_id: "{{ item.vpc_id }}"
|
||||
vpc_region: "{{ aws_region }}"
|
||||
zone: "{{ clusterid }}.{{ dns_domain }}."
|
||||
with_items: "[\
|
||||
{%- if r53_zones is defined and r53_zones[0] | length >= 1 -%}
|
||||
{% for zone in r53_zones[0] -%}
|
||||
{%- if zone.PrivateZone == true -%}
|
||||
{ 'id': '{{ zone.Id | safe }}', 'vpc_id': '{{ vpc.vpcs[0].vpc_id }}' },
|
||||
{%- else -%}
|
||||
{ 'id': '{{ zone.Id | safe }}', 'vpc_id': '' },
|
||||
{%- endif -%}
|
||||
{% endfor -%}
|
||||
{%- else -%}
|
||||
{ 'id': '', 'vpc_id': '' },
|
||||
{ 'id': '', 'vpc_id': '{{ vpc.vpc.id }}' },
|
||||
{%- endif -%}]"
|
||||
when: "\
|
||||
(state is undefined or 'absent' not in state) \
|
||||
or \
|
||||
( ( r53_zones is defined ) and ( r53_zones[0] | length >= 1 ) ) \
|
||||
"
|
||||
@@ -0,0 +1,68 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} Route53 record"
|
||||
route53:
|
||||
command: "{{ 'create' if (state is undefined or 'absent' not in state) else 'delete' }}"
|
||||
overwrite: yes
|
||||
private_zone: "no"
|
||||
record: "{{ 'master' + '.' + clusterid + '.' + dns_domain }}"
|
||||
type: CNAME
|
||||
ttl: 300
|
||||
value: "{{ elbextmaster.results[0].elb.dns_name if (state is undefined or 'absent' not in state) else r53_record.results[0].set.value }}"
|
||||
zone: "{{ clusterid + '.' + dns_domain }}."
|
||||
when: "\
|
||||
( ( elbextmaster is defined ) and ( elbextmaster.results[0] is defined ) and ( elbextmaster.results[0].elb.name is defined ) ) \
|
||||
or \
|
||||
( ( r53_record.results[0].set is defined ) and ( r53_record.results[0].set | length > 0 ) ) \
|
||||
or \
|
||||
( ( r53_record.results[0].msg is defined ) and ( 'Zone ' + clusterid + '.' + dns_domain + '.' + ' does not exist in Route53' not in r53_record.results[0].msg ) )"
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} Route53 record"
|
||||
route53:
|
||||
command: "{{ 'create' if (state is undefined or 'absent' not in state) else 'delete' }}"
|
||||
overwrite: yes
|
||||
private_zone: "no"
|
||||
record: "*.apps.{{ clusterid }}.{{ dns_domain }}"
|
||||
type: CNAME
|
||||
ttl: 300
|
||||
value: "{{ elbextinfra.results[0].elb.dns_name if (state is undefined or 'absent' not in state) else r53_record.results[1].set.value }}"
|
||||
zone: "{{ clusterid + '.' + dns_domain }}."
|
||||
when: "\
|
||||
( ( elbextinfra is defined ) and ( elbextinfra.results[0] is defined ) and ( elbextinfra.results[0].elb.name is defined ) ) \
|
||||
or \
|
||||
( ( r53_record.results[1].set is defined ) and ( r53_record.results[1].set | length > 0 ) ) \
|
||||
or \
|
||||
( ( r53_record.results[1].msg is defined ) and ( 'Zone ' + clusterid + '.' + dns_domain + '.' + ' does not exist in Route53' not in r53_record.results[1].msg ) )"
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} Route53 record"
|
||||
route53:
|
||||
command: "{{ 'create' if (state is undefined or 'absent' not in state) else 'delete' }}"
|
||||
overwrite: yes
|
||||
private_zone: "yes"
|
||||
record: "{{ 'master' + '.' + clusterid + '.' + dns_domain }}"
|
||||
type: CNAME
|
||||
ttl: 300
|
||||
value: "{{ elbintmaster.results[0].elb.dns_name if (state is undefined or 'absent' not in state) else r53_record.results[2].set.value }}"
|
||||
zone: "{{ clusterid + '.' + dns_domain }}."
|
||||
when: "\
|
||||
( ( elbintmaster is defined ) and ( elbintmaster.results[0] is defined ) and ( elbintmaster.results[0].elb.name is defined ) ) \
|
||||
or \
|
||||
( ( r53_record.results[2].set is defined ) and ( r53_record.results[2].set | length > 0 ) ) \
|
||||
or \
|
||||
( ( r53_record.results[2].msg is defined ) and ( 'Zone ' + clusterid + '.' + dns_domain + '.' + ' does not exist in Route53' not in r53_record.results[2].msg ) )"
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} Route53 record"
|
||||
route53:
|
||||
command: "{{ 'create' if (state is undefined or 'absent' not in state) else 'delete' }}"
|
||||
overwrite: "yes"
|
||||
private_zone: "yes"
|
||||
record: "*.apps.{{ clusterid }}.{{ dns_domain }}"
|
||||
type: CNAME
|
||||
ttl: 300
|
||||
value: "{{ elbextinfra.results[0].elb.dns_name if (state is undefined or 'absent' not in state) else r53_record.results[3].set.value }}"
|
||||
zone: "{{ clusterid + '.' + dns_domain }}."
|
||||
when: "\
|
||||
( ( elbextinfra is defined ) and ( elbextinfra.results[0] is defined ) and ( elbextinfra.results[0].elb.name is defined ) ) \
|
||||
or \
|
||||
( ( r53_record.results[3].set is defined ) and ( r53_record.results[3].set | length > 0 ) ) \
|
||||
or \
|
||||
( ( r53_record.results[3].msg is defined ) and ( 'Zone ' + clusterid + '.' + dns_domain + '.' + ' does not exist in Route53' not in r53_record.results[3].msg ) )"
|
||||
@@ -0,0 +1,18 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} RouteTable"
|
||||
ec2_vpc_route_table:
|
||||
purge_routes: "{{ 'false' if (state is undefined or 'absent' not in state) else 'true' }}"
|
||||
purge_subnets: "{{ 'false' if (state is undefined or 'absent' not in state) else 'true' }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
tags:
|
||||
Name: "{{ item.name }}"
|
||||
vpc_id: "{{ vpc.vpc.id if (state is undefined or 'absent' not in state) else vpc.vpcs[0].vpc_id }}"
|
||||
retries: 3
|
||||
delay: 3
|
||||
when: ( vpc.vpc is defined ) or ( vpc.vpcs[0] is defined )
|
||||
with_items:
|
||||
- name: routing
|
||||
- name: "{{ vpc_subnet_azs.0 }}"
|
||||
- name: "{{ vpc_subnet_azs.1 }}"
|
||||
- name: "{{ vpc_subnet_azs.2 }}"
|
||||
@@ -0,0 +1,35 @@
|
||||
---
|
||||
- name: Create RouteTable rules
|
||||
ec2_vpc_route_table:
|
||||
region: "{{ aws_region }}"
|
||||
subnets: "{{ item.subnets }}"
|
||||
routes:
|
||||
- dest: 0.0.0.0/0
|
||||
gateway_id: "{{ item.gw }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
tags:
|
||||
Name: "{{ item.name }}"
|
||||
vpc_id: "{{ vpc.vpc.id }}"
|
||||
retries: 3
|
||||
delay: 5
|
||||
with_items:
|
||||
- name: routing
|
||||
subnets:
|
||||
- "{{ subnet_public.results.0.subnet.id }}"
|
||||
- "{{ subnet_public.results.1.subnet.id }}"
|
||||
- "{{ subnet_public.results.2.subnet.id }}"
|
||||
gw: "{{ igw.gateway_id | default('') }}"
|
||||
- name: "{{ vpc_subnet_azs.0 }}"
|
||||
subnets:
|
||||
- "{{ subnet_private.results.0.subnet.id }}"
|
||||
gw: "{{ natgw1.nat_gateway_id }}"
|
||||
- name: "{{ vpc_subnet_azs.1 }}"
|
||||
subnets:
|
||||
- "{{ subnet_private.results.1.subnet.id }}"
|
||||
gw: "{{ natgw2.nat_gateway_id }}"
|
||||
- name: "{{ vpc_subnet_azs.2 }}"
|
||||
subnets:
|
||||
- "{{ subnet_private.results.2.subnet.id }}"
|
||||
gw: "{{ natgw3.nat_gateway_id }}"
|
||||
when:
|
||||
- ( state is not defined ) or ( 'absent' not in state )
|
||||
10
reference-architecture/3.9/playbooks/roles/aws/tasks/s3.yaml
Normal file
10
reference-architecture/3.9/playbooks/roles/aws/tasks/s3.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} S3 bucket ( hosted registry )"
|
||||
s3_bucket:
|
||||
name: "{{ clusterid }}.{{ dns_domain }}-registry"
|
||||
region: "{{ aws_region }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
tags:
|
||||
Clusterid: "{{ clusterid }}"
|
||||
retries: 3
|
||||
delay: 3
|
||||
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: "Create S3 bucket policy ( hosted registry )"
|
||||
s3_bucket:
|
||||
name: "{{ clusterid }}.{{ dns_domain }}-registry"
|
||||
policy: "{{ lookup('template', playbook_dir + '/roles/aws/templates/s3_bucket_policy_registry.json', convert_data=False) | string }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
retries: 3
|
||||
delay: 3
|
||||
@@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} SecurityGroups"
|
||||
ec2_group:
|
||||
name: "{{ item.name }}"
|
||||
description: "{{ item.name }}"
|
||||
purge_rules: "{{ 'false' if (state is undefined or 'absent' not in state) else 'true' }}"
|
||||
purge_rules_egress: "{{ 'false' if (state is undefined or 'absent' not in state) else 'true' }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
vpc_id: "{{ vpc.vpc.id if (state is undefined or 'absent' not in state) else vpc.vpcs[0].vpc_id }}"
|
||||
when: ( vpc.vpc is defined ) or ( vpc.vpcs[0] is defined )
|
||||
with_items:
|
||||
- name: "node"
|
||||
- name: "master"
|
||||
- name: "infra"
|
||||
- name: "bastion"
|
||||
@@ -0,0 +1,13 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} SecurityGroup (CNS)"
|
||||
ec2_group:
|
||||
name: "{{ item.name }}"
|
||||
description: "{{ item.name }}"
|
||||
purge_rules: "{{ 'false' if (state is undefined or 'absent' not in state) else 'true' }}"
|
||||
purge_rules_egress: "{{ 'false' if (state is undefined or 'absent' not in state) else 'true' }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
vpc_id: "{{ vpc.vpc.id if (state is undefined or 'absent' not in state) else vpc.vpcs[0].vpc_id }}"
|
||||
when: ( vpc.vpc is defined ) or ( vpc.vpcs[0] is defined )
|
||||
with_items:
|
||||
- name: "cns"
|
||||
@@ -0,0 +1,145 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} SecurityGroup rules"
|
||||
ec2_group:
|
||||
name: "{{ item.name }}"
|
||||
description: "{{ item.name }}"
|
||||
purge_rules: "{{ false if (state is undefined or 'absent' not in state) else true }}"
|
||||
purge_rules_egress: "{{ false if (state is undefined or 'absent' not in state) else true }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
vpc_id: "{{ vpc.vpc.id if (state is undefined or 'absent' not in state) else vpc.vpcs[0].vpc_id }}"
|
||||
rules: "{{ item.rules }}"
|
||||
when: ( vpc.vpc is defined ) or ( vpc.vpcs[0] is defined )
|
||||
with_items:
|
||||
- name: master
|
||||
rules: "{{ [
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '443',
|
||||
'to_port': '443',
|
||||
'cidr_ip': '0.0.0.0/0'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '2379',
|
||||
'to_port': '2380',
|
||||
'group_name': 'master'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '2379',
|
||||
'to_port': '2380',
|
||||
'group_name': 'node'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '24224',
|
||||
'to_port': '24224',
|
||||
'group_name': 'master'
|
||||
},
|
||||
{
|
||||
'proto': 'udp',
|
||||
'from_port': '24224',
|
||||
'to_port': '24224',
|
||||
'group_name': 'master'
|
||||
}
|
||||
] if (state is undefined or 'absent' not in state) else '' }}"
|
||||
- name: node
|
||||
rules: "{{ [
|
||||
{
|
||||
'proto': 'icmp',
|
||||
'from_port': '8',
|
||||
'to_port': '-1',
|
||||
'cidr_ip': '0.0.0.0/0'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '22',
|
||||
'to_port': '22',
|
||||
'group_name': 'bastion'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '53',
|
||||
'to_port': '53',
|
||||
'group_name': 'node'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '2049',
|
||||
'to_port': '2049',
|
||||
'group_name': 'node'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '8053',
|
||||
'to_port': '8053',
|
||||
'group_name': 'node'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '10250',
|
||||
'to_port': '10250',
|
||||
'group_name': 'node'
|
||||
},
|
||||
{
|
||||
'proto': 'udp',
|
||||
'from_port': '53',
|
||||
'to_port': '53',
|
||||
'group_name': 'node'
|
||||
},
|
||||
{
|
||||
'proto': 'udp',
|
||||
'from_port': '4789',
|
||||
'to_port': '4789',
|
||||
'group_name': 'node'
|
||||
},
|
||||
{
|
||||
'proto': 'udp',
|
||||
'from_port': '8053',
|
||||
'to_port': '8053',
|
||||
'group_name': 'node'
|
||||
}
|
||||
] if (state is undefined or 'absent' not in state) else '' }}"
|
||||
- name: infra
|
||||
rules: "{{ [
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '80',
|
||||
'to_port': '80',
|
||||
'cidr_ip': '0.0.0.0/0'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '443',
|
||||
'to_port': '443',
|
||||
'cidr_ip': '0.0.0.0/0'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '9200',
|
||||
'to_port': '9200',
|
||||
'cidr_ip': '0.0.0.0/0'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '9300',
|
||||
'to_port': '9300',
|
||||
'cidr_ip': '0.0.0.0/0'
|
||||
}
|
||||
] if (state is undefined or 'absent' not in state) else '' }}"
|
||||
- name: bastion
|
||||
rules: "{{ [
|
||||
{
|
||||
'proto': 'icmp',
|
||||
'from_port': '8',
|
||||
'to_port': '-1',
|
||||
'cidr_ip': '0.0.0.0/0'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '22',
|
||||
'to_port': '22',
|
||||
'cidr_ip': '0.0.0.0/0'
|
||||
}
|
||||
] if (state is undefined or 'absent' not in state) else '' }}"
|
||||
@@ -0,0 +1,82 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} SecurityGroup (CNS) rules"
|
||||
ec2_group:
|
||||
name: "{{ item.name }}"
|
||||
description: "{{ item.name }}"
|
||||
purge_rules: "{{ false if (state is undefined or 'absent' not in state) else true }}"
|
||||
purge_rules_egress: "{{ false if (state is undefined or 'absent' not in state) else true }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
vpc_id: "{{ vpc.vpc.id if (state is undefined or 'absent' not in state) else vpc.vpcs[0].vpc_id }}"
|
||||
rules: "{{ item.rules }}"
|
||||
when: ( vpc.vpc is defined ) or ( vpc.vpcs[0] is defined )
|
||||
with_items:
|
||||
- name: cns
|
||||
rules: "{{ [
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '111',
|
||||
'to_port': '111',
|
||||
'group_name': 'cns'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '2222',
|
||||
'to_port': '2222',
|
||||
'group_name': 'cns'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '3260',
|
||||
'to_port': '3260',
|
||||
'group_name': 'cns'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '3260',
|
||||
'to_port': '3260',
|
||||
'group_name': 'node'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '24007',
|
||||
'to_port': '24008',
|
||||
'group_name': 'cns'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '24007',
|
||||
'to_port': '24008',
|
||||
'group_name': 'node'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '24010',
|
||||
'to_port': '24010',
|
||||
'group_name': 'cns'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '24010',
|
||||
'to_port': '24010',
|
||||
'group_name': 'node'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '49152',
|
||||
'to_port': '49664',
|
||||
'group_name': 'cns'
|
||||
},
|
||||
{
|
||||
'proto': 'tcp',
|
||||
'from_port': '49152',
|
||||
'to_port': '49664',
|
||||
'group_name': 'node'
|
||||
},
|
||||
{
|
||||
'proto': 'udp',
|
||||
'from_port': '111',
|
||||
'to_port': '111',
|
||||
'group_name': 'cns'
|
||||
}
|
||||
] if (state is undefined or 'absent' not in state) else '' }}"
|
||||
@@ -0,0 +1,24 @@
|
||||
---
|
||||
- name: "Ensure SSH key directory exists"
|
||||
file:
|
||||
path: "~/.ssh"
|
||||
state: directory
|
||||
|
||||
- name: "Stat clusterid SSH key"
|
||||
stat:
|
||||
path: "~/.ssh/{{ clusterid }}.{{ dns_domain }}"
|
||||
register: sshkey
|
||||
|
||||
- name: "{{ 'Generate' if (state is undefined or 'absent' not in state) else 'Terminate' }} clusterid SSH key"
|
||||
shell: "{{ cmd }}"
|
||||
vars:
|
||||
cmd: "{{ \
|
||||
'ssh-keygen -b 2048 -t rsa -f ~/.ssh/' + clusterid + '.' + dns_domain + ' -q -C user@' + dns_domain + ' -N ' + sshkey_password + ' -q' \
|
||||
if (state is undefined or 'absent' not in state) else \
|
||||
'rm -rf ~/.ssh/' + clusterid + '.' + dns_domain + '*'
|
||||
}}"
|
||||
when: "\
|
||||
( ( state is undefined or 'absent' not in state ) and sshkey.stat.exists == false ) \
|
||||
or \
|
||||
( ( state is defined and 'absent' in state ) and sshkey.stat.exists == true ) \
|
||||
"
|
||||
@@ -0,0 +1,38 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} Subnet ( public )"
|
||||
ec2_vpc_subnet:
|
||||
az: "{{ item.az }}"
|
||||
cidr: "{{ item.cidr }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
vpc_id: "{{ vpc.vpc.id if (state is undefined or 'absent' not in state) else vpc.vpcs[0].vpc_id }}"
|
||||
with_items:
|
||||
- cidr: "{{ subnets_public_cidr.0 }}"
|
||||
az: "{{ vpc_subnet_azs.0 }}"
|
||||
- cidr: "{{ subnets_public_cidr.1 }}"
|
||||
az: "{{ vpc_subnet_azs.1 }}"
|
||||
- cidr: "{{ subnets_public_cidr.2 }}"
|
||||
az: "{{ vpc_subnet_azs.2 }}"
|
||||
retries: 99
|
||||
delay: 99
|
||||
register: subnet_public
|
||||
when: ( vpc.vpc is defined ) or ( vpc.vpcs[0] is defined )
|
||||
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} Subnet ( private )"
|
||||
ec2_vpc_subnet:
|
||||
az: "{{ item.az }}"
|
||||
cidr: "{{ item.cidr }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
vpc_id: "{{ vpc.vpc.id if (state is undefined or 'absent' not in state) else vpc.vpcs[0].vpc_id }}"
|
||||
with_items:
|
||||
- cidr: "{{ subnets_private_cidr.0 }}"
|
||||
az: "{{ vpc_subnet_azs.0 }}"
|
||||
- cidr: "{{ subnets_private_cidr.1 }}"
|
||||
az: "{{ vpc_subnet_azs.1 }}"
|
||||
- cidr: "{{ subnets_private_cidr.2 }}"
|
||||
az: "{{ vpc_subnet_azs.2 }}"
|
||||
retries: 99
|
||||
delay: 99
|
||||
register: subnet_private
|
||||
when: ( vpc.vpc is defined ) or ( vpc.vpcs[0] is defined )
|
||||
@@ -0,0 +1,13 @@
|
||||
---
|
||||
#How to initialize me: Add the following to your role
|
||||
#- import_task: roles/aws/tasks/aws-tag.yaml
|
||||
# vars:
|
||||
# - resource: "{{ vpcdhcpopts.dhcp_options_id }}"
|
||||
# - tagss: "Key1=Value1, Key2={{ var2 }}, string/{{ var3 }}={{ var3 }}"
|
||||
|
||||
- name: Create tag
|
||||
ec2_tag:
|
||||
resource: "{{ resource }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: present
|
||||
tags: "{{ tagss }}"
|
||||
@@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} VPC"
|
||||
ec2_vpc_net:
|
||||
cidr_block: "{{ vpc_cidr }}"
|
||||
dhcp_opts_id: "{{ vpcdhcpopts.dhcp_options_id if (state is undefined or 'absent' not in state) else '' }}"
|
||||
name: "{{ clusterid }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: "{{ state | default('present') }}"
|
||||
retries: 3
|
||||
delay: 5
|
||||
register: vpc
|
||||
@@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: "Create VPC DHCP Options"
|
||||
ec2_vpc_dhcp_options:
|
||||
domain_name: "{{ 'ec2.internal' if (aws_region == 'us-east-1') else aws_region + '.compute.internal' }}"
|
||||
region: "{{ aws_region }}"
|
||||
dns_servers:
|
||||
- AmazonProvidedDNS
|
||||
inherit_existing: False
|
||||
retries: 3
|
||||
delay: 3
|
||||
register: vpcdhcpopts
|
||||
@@ -0,0 +1,3 @@
|
||||
{% for n in ns %}
|
||||
{{ n }}
|
||||
{% endfor %}
|
||||
@@ -0,0 +1,87 @@
|
||||
#cloud-config
|
||||
cloud_config_modules:
|
||||
{% if 'bastion' in item.name %}
|
||||
- package-update-upgrade-install
|
||||
{% else %}
|
||||
- package-update-upgrade-install
|
||||
- disk_setup
|
||||
- mounts
|
||||
- cc_write_files
|
||||
{% endif %}
|
||||
|
||||
packages:
|
||||
{% if 'bastion' in item.name -%}
|
||||
- nmap-ncat
|
||||
{% else -%}
|
||||
- lvm2
|
||||
{% endif %}
|
||||
{% if 'bastion' not in item.name %}
|
||||
|
||||
write_files:
|
||||
- content: |
|
||||
STORAGE_DRIVER=overlay2
|
||||
DEVS=/dev/{%
|
||||
if 'c5' in item.type %}
|
||||
nvme1n1 {%
|
||||
elif 'c5d' in item.type %}
|
||||
nvme1n1 {%
|
||||
elif 'm5' in item.type %}
|
||||
nvme1n1 {%
|
||||
elif 'i3.metal' in item.type %}
|
||||
nvme1n1 {%
|
||||
else %}
|
||||
xvdb {%
|
||||
endif %}
|
||||
|
||||
VG=dockervg
|
||||
CONTAINER_ROOT_LV_NAME=dockerlv
|
||||
CONTAINER_ROOT_LV_MOUNT_PATH=/var/lib/docker
|
||||
CONTAINER_ROOT_LV_SIZE=100%FREE
|
||||
path: "/etc/sysconfig/docker-storage-setup"
|
||||
permissions: "0644"
|
||||
owner: "root"
|
||||
|
||||
fs_setup:
|
||||
- label: ocp_emptydir
|
||||
filesystem: xfs
|
||||
device: /dev/{%
|
||||
if 'c5' in item.type %}
|
||||
nvme2n1 {%
|
||||
elif 'c5d' in item.type %}
|
||||
nvme2n1 {%
|
||||
elif 'm5' in item.type %}
|
||||
nvme2n1 {%
|
||||
elif 'i3.metal' in item.type %}
|
||||
nvme2n1 {%
|
||||
else %}
|
||||
xvdc{%
|
||||
endif %}
|
||||
|
||||
partition: auto{%
|
||||
if 'master' in item.name %}
|
||||
|
||||
- label: etcd
|
||||
filesystem: xfs
|
||||
device: /dev/{%
|
||||
if 'c5' in item.type %}
|
||||
nvme3n1 {%
|
||||
elif 'c5d' in item.type %}
|
||||
nvme3n1 {%
|
||||
elif 'm5' in item.type %}
|
||||
nvme3n1 {%
|
||||
elif 'i3.metal' in item.type %}
|
||||
nvme3n1 {%
|
||||
else %}
|
||||
xvdd{%
|
||||
endif %}
|
||||
|
||||
partition: auto
|
||||
{% endif %}
|
||||
|
||||
mounts:
|
||||
- [ "LABEL=ocp_emptydir", "/var/lib/origin/openshift.local.volumes", xfs, "defaults,gquota" ]{%
|
||||
if 'master' in item.name %}
|
||||
|
||||
- [ "LABEL=etcd", "/var/lib/etcd", xfs, "defaults,gquota" ]
|
||||
{% endif %}
|
||||
{%- endif %}
|
||||
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"ec2:DescribeVolumes",
|
||||
"ec2:CreateVolume",
|
||||
"ec2:CreateTags",
|
||||
"ec2:DescribeInstances",
|
||||
"ec2:AttachVolume",
|
||||
"ec2:DetachVolume",
|
||||
"ec2:DeleteVolume",
|
||||
"ec2:DescribeSubnets",
|
||||
"ec2:CreateSecurityGroup",
|
||||
"ec2:DeleteSecurityGroup",
|
||||
"ec2:DescribeSecurityGroups",
|
||||
"ec2:DescribeRouteTables",
|
||||
"ec2:AuthorizeSecurityGroupIngress",
|
||||
"ec2:RevokeSecurityGroupIngress",
|
||||
"elasticloadbalancing:DescribeTags",
|
||||
"elasticloadbalancing:CreateLoadBalancerListeners",
|
||||
"elasticloadbalancing:ConfigureHealthCheck",
|
||||
"elasticloadbalancing:DeleteLoadBalancerListeners",
|
||||
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
|
||||
"elasticloadbalancing:DescribeLoadBalancers",
|
||||
"elasticloadbalancing:CreateLoadBalancer",
|
||||
"elasticloadbalancing:DeleteLoadBalancer",
|
||||
"elasticloadbalancing:ModifyLoadBalancerAttributes",
|
||||
"elasticloadbalancing:DescribeLoadBalancerAttributes"
|
||||
],
|
||||
"Resource": "*",
|
||||
"Effect": "Allow",
|
||||
"Sid": "1"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"s3:*"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::{{ clusterid }}.{{ dns_domain }}-registry",
|
||||
"arn:aws:s3:::{{ clusterid }}.{{ dns_domain }}-registry/*"
|
||||
],
|
||||
"Effect": "Allow",
|
||||
"Sid": "1"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
openshift_cloudprovider_kind=aws
|
||||
openshift_clusterid={{ clusterid }}
|
||||
{{ lookup('file', (
|
||||
'~/.ssh/config-' +
|
||||
clusterid +
|
||||
'.' +
|
||||
dns_domain +
|
||||
'-cpkuser_access_key'
|
||||
) ) |
|
||||
regex_replace(".* OUTPUT .*", '') |
|
||||
trim }}
|
||||
@@ -0,0 +1,3 @@
|
||||
{% for n in ns %}
|
||||
{{ n }}
|
||||
{% endfor %}
|
||||
@@ -0,0 +1,19 @@
|
||||
[masters]
|
||||
{%- for i in ec2master.results %}
|
||||
{{ i.tagged_instances[0].private_dns_name }} openshift_node_labels="{'region': 'master'}"
|
||||
{%- endfor %}
|
||||
|
||||
[etcd]
|
||||
|
||||
[etcd:children]
|
||||
masters
|
||||
|
||||
[nodes]
|
||||
{% for i in ec2node.results -%}
|
||||
{{ i.tagged_instances[0].private_dns_name }} openshift_node_labels="{'region': 'apps'}"
|
||||
{% endfor -%}
|
||||
{% for i in ec2infra.results -%}
|
||||
{{ i.tagged_instances[0].private_dns_name }} openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
|
||||
{% endfor %}
|
||||
[nodes:children]
|
||||
masters
|
||||
@@ -0,0 +1,3 @@
|
||||
{% for i in ec2cns.results %}
|
||||
{{ i.tagged_instances[0].private_dns_name }} openshift_schedulable=True
|
||||
{% endfor %}
|
||||
@@ -0,0 +1,4 @@
|
||||
[glusterfs]
|
||||
{% for i in ec2cns.results %}
|
||||
{{ i.tagged_instances[0].private_dns_name }} glusterfs_devices='[ "/dev/nvme3n1" ]'
|
||||
{% endfor %}
|
||||
@@ -0,0 +1,19 @@
|
||||
openshift_hosted_manage_registry=true
|
||||
openshift_hosted_registry_storage_kind=object
|
||||
openshift_hosted_registry_storage_provider=s3
|
||||
{{ lookup('file', ( '~/.ssh/config-' +
|
||||
clusterid +
|
||||
'.' +
|
||||
dns_domain +
|
||||
'-s3user_access_key') ) |
|
||||
regex_replace(".* OUTPUT .*", '') |
|
||||
trim }}
|
||||
openshift_hosted_registry_storage_s3_bucket={{ clusterid }}.{{ dns_domain }}-registry
|
||||
openshift_hosted_registry_storage_s3_region={{ aws_region }}
|
||||
openshift_hosted_registry_storage_s3_chunksize=26214400
|
||||
openshift_hosted_registry_storage_s3_rootdirectory=/registry
|
||||
openshift_hosted_registry_pullthrough=true
|
||||
openshift_hosted_registry_acceptschema2=true
|
||||
openshift_hosted_registry_enforcequota=true
|
||||
openshift_hosted_registry_replicas=3
|
||||
openshift_hosted_registry_selector='region=infra'
|
||||
@@ -0,0 +1,3 @@
|
||||
openshift_master_default_subdomain=apps.{{ clusterid }}.{{ dns_domain }}
|
||||
openshift_master_cluster_hostname=master.{{ clusterid }}.{{ dns_domain }}
|
||||
openshift_master_cluster_public_hostname=master.{{ clusterid }}.{{ dns_domain }}
|
||||
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "1",
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"AWS": "arn:aws:iam::{{ iam_identity }}:user/{{ clusterid }}.{{ dns_domain }}-registry"
|
||||
},
|
||||
"Action": "s3:*",
|
||||
"Resource": "arn:aws:s3:::{{ clusterid }}.{{ dns_domain }}-registry"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
Host bastion
|
||||
HostName {{ eipbastion.public_ip }}
|
||||
User ec2-user
|
||||
StrictHostKeyChecking no
|
||||
ProxyCommand none
|
||||
CheckHostIP no
|
||||
ForwardAgent yes
|
||||
ServerAliveInterval 15
|
||||
TCPKeepAlive yes
|
||||
ControlMaster auto
|
||||
ControlPath ~/.ssh/mux-%r@%h:%p
|
||||
ControlPersist 15m
|
||||
ServerAliveInterval 30
|
||||
IdentityFile ~/.ssh/{{ clusterid }}.{{ dns_domain }}
|
||||
|
||||
Host *.compute.internal
|
||||
ProxyCommand ssh -W %h:%p bastion
|
||||
user ec2-user
|
||||
StrictHostKeyChecking no
|
||||
CheckHostIP no
|
||||
ServerAliveInterval 30
|
||||
IdentityFile ~/.ssh/{{ clusterid }}.{{ dns_domain }}
|
||||
|
||||
Host *.ec2.internal
|
||||
ProxyCommand ssh -W %h:%p bastion
|
||||
user ec2-user
|
||||
StrictHostKeyChecking no
|
||||
CheckHostIP no
|
||||
ServerAliveInterval 30
|
||||
IdentityFile ~/.ssh/{{ clusterid }}.{{ dns_domain }}
|
||||
32
reference-architecture/3.9/playbooks/undeploy_aws.yaml
Normal file
32
reference-architecture/3.9/playbooks/undeploy_aws.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
become: False
|
||||
vars:
|
||||
- state: 'absent'
|
||||
vars_files:
|
||||
- vars/main.yaml
|
||||
tasks:
|
||||
- import_tasks: roles/aws/tasks/getcreds.yaml
|
||||
- import_tasks: roles/aws/tasks/getazs.yaml
|
||||
- import_tasks: roles/aws/tasks/getec2ami.yaml
|
||||
- import_tasks: roles/aws/tasks/gather_facts.yaml
|
||||
- import_tasks: roles/aws/tasks/sshkeys.yaml
|
||||
- import_tasks: roles/aws/tasks/ec2keypair.yaml
|
||||
- import_tasks: roles/aws/tasks/route53record.yaml
|
||||
- import_tasks: roles/aws/tasks/route53.yaml
|
||||
- import_tasks: roles/aws/tasks/iam.yaml
|
||||
- import_tasks: roles/aws/tasks/s3.yaml
|
||||
- import_tasks: roles/aws/tasks/natgw.yaml
|
||||
- import_tasks: roles/aws/tasks/elb.yaml
|
||||
- import_tasks: roles/aws/tasks/ec2_cns.yaml
|
||||
- import_tasks: roles/aws/tasks/ec2.yaml
|
||||
- import_tasks: roles/aws/tasks/securitygrouprule_cns.yaml
|
||||
- import_tasks: roles/aws/tasks/securitygroup_cns.yaml
|
||||
- import_tasks: roles/aws/tasks/securitygrouprule.yaml
|
||||
- import_tasks: roles/aws/tasks/securitygroup.yaml
|
||||
- import_tasks: roles/aws/tasks/routetable.yaml
|
||||
- import_tasks: roles/aws/tasks/subnet.yaml
|
||||
- import_tasks: roles/aws/tasks/igw.yaml
|
||||
- import_tasks: roles/aws/tasks/vpc.yaml
|
||||
- import_tasks: roles/aws/tasks/configfiles.yaml
|
||||
- import_tasks: roles/aws/tasks/configfiles_cns.yaml
|
||||
39
reference-architecture/3.9/playbooks/vars/main.yaml
Normal file
39
reference-architecture/3.9/playbooks/vars/main.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
aws_cred_profile: "default"
|
||||
|
||||
# password for ssh key - ~/.ssh/{{ clusterid }}.{{ dns_domain }}
|
||||
sshkey_password: 'abc123'
|
||||
|
||||
clusterid: "refarch"
|
||||
dns_domain: "example.com"
|
||||
aws_region: "us-east-1"
|
||||
|
||||
vpc_cidr: "172.16.0.0/16"
|
||||
|
||||
subnets_public_cidr:
|
||||
- 172.16.0.0/24
|
||||
- 172.16.1.0/24
|
||||
- 172.16.2.0/24
|
||||
|
||||
subnets_private_cidr:
|
||||
- 172.16.16.0/20
|
||||
- 172.16.32.0/20
|
||||
- 172.16.48.0/20
|
||||
|
||||
ec2_type_bastion: "t2.medium"
|
||||
|
||||
#ec2_count_master: 3
|
||||
ec2_type_master: "m5.2xlarge"
|
||||
|
||||
#ec2_count_infra: 3
|
||||
ec2_type_infra: "m5.2xlarge"
|
||||
|
||||
#ec2_count_node: 3
|
||||
ec2_type_node: "m5.2xlarge"
|
||||
|
||||
#ec2_count_cns: 3
|
||||
ec2_type_cns: "m5.2xlarge"
|
||||
|
||||
rhel_release: "rhel-7.5"
|
||||
|
||||
#ec2ami: ami-abc3231a
|
||||
@@ -2,13 +2,16 @@
|
||||
|
||||
This repository contains a series of directories containing code used to deploy an OpenShift environment on different cloud providers. The code in this repository supplements the reference architecture guides for OpenShift 3. Different guides and documentation exists depending on the different providers. Regardless of the provider, the environment will deploy masters, infrastructure and application nodes. The code also deploys a Docker registry and scales the router to the number of infrastructure nodes.
|
||||
|
||||
**NOTE: Some repositories containing scripts and ansible playbooks are
|
||||
deprecated.**
|
||||
|
||||
For documentation, please see the following links
|
||||
|
||||
* VMWare - [Deploying and Managing OpenShift 3.9 on VMware vSphere](https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_vmware_vsphere/)
|
||||
* OSP - [Deploying and Managing OpenShift 3.9 on Red Hat OpenStack Platform 10](https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_red_hat_openstack_platform_10/)
|
||||
* Azure - [Deploying and Managing OpenShift 3.9 on Azure](https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_azure/)
|
||||
* AWS - [Deploying and Managing OpenShift Container Platform 3.6 on Amazon Web Services](https://access.redhat.com/documentation/en-us/reference_architectures/2017/html-single/deploying_and_managing_openshift_container_platform_3.6_on_amazon_web_services/)
|
||||
* GCP - [Deploying Red Hat OpenShift Container Platform 3 on Google Cloud Platform](https://access.redhat.com/documentation/en-us/reference_architectures/2017/html-single/deploying_and_managing_openshift_container_platform_3_on_google_cloud_platform/)
|
||||
* VMWare - [Deploying and Managing OpenShift Container Platform 3.6 on VMware vSphere](https://access.redhat.com/documentation/en-us/reference_architectures/2017/html-single/deploying_a_red_hat_openshift_container_platform_3_on_vmware_vcenter_6/)
|
||||
* OSP - [Deploying and Managing Red Hat OpenShift Container Platform 3.6 on Red Hat OpenStack Platform 10](https://access.redhat.com/documentation/en-us/reference_architectures/2017/html-single/deploying_and_managing_red_hat_openshift_container_platform_3.6_on_red_hat_openstack_platform_10/)
|
||||
* Azure - [Deploying OpenShift Container Platform 3.6 on Microsoft Azure](https://access.redhat.com/documentation/en-us/reference_architectures/2017/html-single/deploying_red_hat_openshift_container_platform_3.6_on_microsoft_azure/)
|
||||
* RHV - [Deploying Red Hat OpenShift Container Platform 3.6 on Red Hat Virtualization 4](https://access.redhat.com/documentation/en-us/reference_architectures/2017/html-single/deploying_red_hat_openshift_container_platform_3.6_on_red_hat_virtualization_4/)
|
||||
|
||||
For a list of more reference architectures, see [OpenShift Container Platform reference architectures](https://access.redhat.com/documentation/en-us/reference_architectures/?category=openshift%2520container%2520platform)
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
|
||||
# DEPRECATED
|
||||
This code has been deprecated please check the directory https://github.com/openshift/openshift-ansible-contrib/tree/master/reference-architecture/3.9/playbooks and the new reference architecture document
|
||||
https://access.redhat.com/documentation/en-us/reference_architectures/2018/html/deploying_and_managing_openshift_3.9_on_amazon_web_services/
|
||||
|
||||
# The Reference Architecture OpenShift on Amazon Web Services
|
||||
This repository contains the scripts used to deploy an OpenShift Container Platform or OpenShift Origin environment based off of the Reference Architecture Guide for OCP 3.6 on Amazon Web Services.
|
||||
|
||||
|
||||
@@ -164,7 +164,7 @@ def launch_refarch_env(region=None,
|
||||
click.echo('\tcontainerized: %s' % containerized)
|
||||
click.echo('\tnode_type: %s' % node_type)
|
||||
click.echo('\texisting_stack: %s' % existing_stack)
|
||||
click.echo('\topenshit_sdn: %s' % openshift_sdn)
|
||||
click.echo('\topenshift_sdn: %s' % openshift_sdn)
|
||||
click.echo('\tSubnets, Security Groups, and IAM Roles will be gather from the CloudFormation')
|
||||
click.echo("")
|
||||
else:
|
||||
|
||||
@@ -1 +1 @@
|
||||
/usr/share/ansible/openshift-ansible/library/rpm_q.py
|
||||
/usr/share/ansible/openshift-ansible/roles/lib_utils/library/rpm_q.py
|
||||
@@ -1,5 +1,7 @@
|
||||
# Red Hat OpenShift Container Platform on Azure
|
||||
|
||||
## **NOTE: This repository contains deprecated scripts and ansible playbooks. Refer to the official documentation [Deploying and Managing OpenShift 3.9 on Azure](https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_azure/)**
|
||||
|
||||
## Overview
|
||||
The reference architecture supports the creation of either a multi-node full HA
|
||||
production cluster or a single node designed for exploration of OpenShift on Azure.
|
||||
|
||||
88
reference-architecture/gcp/3.9/README.md
Normal file
88
reference-architecture/gcp/3.9/README.md
Normal file
@@ -0,0 +1,88 @@
|
||||
## **NOTE: This repository contains unsupported scripts. Refer to the official documentation [Deploying and Managing OpenShift 3.9 on Google Cloud Platform](https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_google_cloud_platform/)**
|
||||
|
||||
To simplify the infrastructure creation/deletion, a couple of scripts has been
|
||||
created by wrapping up the commands on the official documentation.
|
||||
|
||||
# Requisites
|
||||
A proper variables file is required. See [Environment configuration section in the official documentation](https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_google_cloud_platform/#environment_configuration) for more information.
|
||||
|
||||
An example file [infrastructure.vars](infrastructure.vars) has been included as
|
||||
a reference.
|
||||
|
||||
**NOTE:** A [bastion.vars](bastion.vars) file is created as well to help bastion
|
||||
hosts tasks.
|
||||
|
||||
**IMPORTANT:** The image is not created as part of this process. It should be
|
||||
created prior to running the
|
||||
[create_infrastructure.sh](create_infrastructure.sh) script.
|
||||
|
||||
## [create_infrastructure.sh](create_infrastructure.sh)
|
||||
This script creates all the required infrastructure in GCP as explained in the
|
||||
official documentation, including CNS nodes.
|
||||
|
||||
Usage:
|
||||
```
|
||||
./create_infrastructure.sh <vars_file>
|
||||
```
|
||||
|
||||
After the infrastructure has been created, configure your `~/.ssh/config` as
|
||||
explained in the [reference architecture](https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_google_cloud_platform/#configuring_ssh_config_to_use_bastion_as_jumphost)
|
||||
|
||||
Copy the [bastion.sh](bastion.sh) and your bastion.vars files to the
|
||||
bastion host:
|
||||
|
||||
```
|
||||
scp bastion.sh my.vars user@BASTIONIP:
|
||||
```
|
||||
|
||||
Connect to the bastion host and run the bastion.sh script:
|
||||
|
||||
```
|
||||
ssh user@BASTIONIP
|
||||
./bastion.sh ./my.vars
|
||||
```
|
||||
|
||||
**NOTE:** The last step reboots all the nodes and the script ends as it seems to
|
||||
fail.
|
||||
|
||||
After it finishes run the installation prerrequisites and the installation
|
||||
itself from the bastion host (using tmux is optional but recommended):
|
||||
|
||||
```
|
||||
tmux
|
||||
ansible-playbook -i inventory \
|
||||
/usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml
|
||||
ansible-playbook -i inventory \
|
||||
/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml
|
||||
```
|
||||
|
||||
## [delete_infrastructure.sh](delete_infrastructure.sh)
|
||||
This script removes all the required infrastructure in GCP as explained in the
|
||||
official documentation, including CNS nodes.
|
||||
|
||||
USE IT WITH CAUTION AS SETTING A VARIABLE INCORRECTLY CAN HAVE DISASTROUS
|
||||
CONSEQUENCES
|
||||
|
||||
First, remove the PVs (as cluster-admin):
|
||||
|
||||
```
|
||||
for i in $(oc get pv -o name); do
|
||||
oc delete $i
|
||||
done
|
||||
```
|
||||
|
||||
Unsubscribe the instances from the bastion host:
|
||||
|
||||
```
|
||||
sudo subscription-manager remove --all
|
||||
sudo subscription-manager unregister
|
||||
|
||||
ansible all -b -i inventory -m shell -a "subscription-manager remove --all"
|
||||
ansible all -b -i inventory -m shell -a "subscription-manager unregister"
|
||||
```
|
||||
|
||||
And remove all the infrastructure objects (from your workstation)
|
||||
|
||||
```
|
||||
./delete_infrastructure.sh <vars_file>
|
||||
```
|
||||
250
reference-architecture/gcp/3.9/bastion.sh
Executable file
250
reference-architecture/gcp/3.9/bastion.sh
Executable file
@@ -0,0 +1,250 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
warnuser(){
|
||||
cat << EOF
|
||||
###########
|
||||
# WARNING #
|
||||
###########
|
||||
This script is distributed WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND
|
||||
Refer to the official documentation
|
||||
https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_google_cloud_platform/
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
die(){
|
||||
echo "$1"
|
||||
exit $2
|
||||
}
|
||||
|
||||
usage(){
|
||||
warnuser
|
||||
echo "$0 <vars_file>"
|
||||
echo " vars_file The file containing all the required variables"
|
||||
echo "Examples:"
|
||||
echo " $0 myvars"
|
||||
}
|
||||
|
||||
if [[ ( $@ == "--help") || $@ == "-h" ]]
|
||||
then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ $# -lt 1 ]]
|
||||
then
|
||||
usage
|
||||
die "vars_file not provided" 2
|
||||
fi
|
||||
|
||||
warnuser
|
||||
|
||||
VARSFILE=${1}
|
||||
|
||||
if [[ ! -f ${VARSFILE} ]]
|
||||
then
|
||||
usage
|
||||
die "vars_file not found" 2
|
||||
fi
|
||||
|
||||
read -p "Are you sure? " -n 1 -r
|
||||
echo # (optional) move to a new line
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]
|
||||
then
|
||||
die "User cancel" 4
|
||||
fi
|
||||
|
||||
source ${VARSFILE}
|
||||
|
||||
if [ -z $RHUSER ]; then
|
||||
sudo subscription-manager register --activationkey=${AK} --org=${ORGID}
|
||||
else
|
||||
sudo subscription-manager register --user=${RHUSER} --password=${RHPASS}
|
||||
fi
|
||||
|
||||
sudo subscription-manager attach --pool=${POOLID}
|
||||
sudo subscription-manager repos --disable="*" \
|
||||
--enable="rhel-7-server-rpms" \
|
||||
--enable="rhel-7-server-extras-rpms" \
|
||||
--enable="rhel-7-server-ose-${OCPVER}-rpms" \
|
||||
--enable="rhel-7-fast-datapath-rpms" \
|
||||
--enable="rhel-7-server-ansible-2.4-rpms"
|
||||
|
||||
sudo yum install atomic-openshift-utils tmux -y
|
||||
|
||||
sudo yum update -y
|
||||
|
||||
cat <<'EOF' > ./ansible.cfg
|
||||
[defaults]
|
||||
forks = 20
|
||||
host_key_checking = False
|
||||
remote_user = MYUSER
|
||||
roles_path = roles/
|
||||
gathering = smart
|
||||
fact_caching = jsonfile
|
||||
fact_caching_connection = $HOME/ansible/facts
|
||||
fact_caching_timeout = 600
|
||||
log_path = $HOME/ansible.log
|
||||
nocows = 1
|
||||
callback_whitelist = profile_tasks
|
||||
|
||||
[ssh_connection]
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=600s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=false -o ForwardAgent=yes
|
||||
control_path = %(directory)s/%%h-%%r
|
||||
pipelining = True
|
||||
timeout = 10
|
||||
|
||||
[persistent_connection]
|
||||
connect_timeout = 30
|
||||
connect_retries = 30
|
||||
connect_interval = 1
|
||||
EOF
|
||||
|
||||
sed -i -e "s/MYUSER/${MYUSER}/g" ./ansible.cfg
|
||||
|
||||
cat <<'EOF' > ./inventory
|
||||
[OSEv3:children]
|
||||
masters
|
||||
etcd
|
||||
nodes
|
||||
glusterfs
|
||||
|
||||
[OSEv3:vars]
|
||||
ansible_become=true
|
||||
openshift_release=vOCPVER
|
||||
os_firewall_use_firewalld=True
|
||||
openshift_clock_enabled=true
|
||||
|
||||
openshift_cloudprovider_kind=gce
|
||||
openshift_gcp_project=PROJECTID
|
||||
openshift_gcp_prefix=CLUSTERID
|
||||
# If deploying single zone cluster set to "False"
|
||||
openshift_gcp_multizone="True"
|
||||
openshift_gcp_network_name=CLUSTERID-net
|
||||
|
||||
openshift_master_api_port=443
|
||||
openshift_master_console_port=443
|
||||
|
||||
openshift_node_local_quota_per_fsgroup=512Mi
|
||||
|
||||
openshift_hosted_registry_replicas=1
|
||||
openshift_hosted_registry_storage_kind=object
|
||||
openshift_hosted_registry_storage_provider=gcs
|
||||
openshift_hosted_registry_storage_gcs_bucket=CLUSTERID-registry
|
||||
|
||||
openshift_master_cluster_method=native
|
||||
openshift_master_cluster_hostname=CLUSTERID-ocp.DOMAIN
|
||||
openshift_master_cluster_public_hostname=CLUSTERID-ocp.DOMAIN
|
||||
openshift_master_default_subdomain=CLUSTERID-apps.DOMAIN
|
||||
|
||||
os_sdn_network_plugin_name=redhat/openshift-ovs-networkpolicy
|
||||
|
||||
deployment_type=openshift-enterprise
|
||||
|
||||
# Required per https://access.redhat.com/solutions/3480921
|
||||
oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version}
|
||||
openshift_examples_modify_imagestreams=true
|
||||
openshift_storage_glusterfs_image=registry.access.redhat.com/rhgs3/rhgs-server-rhel7
|
||||
openshift_storage_glusterfs_block_image=registry.access.redhat.com/rhgs3/rhgs-gluster-block-prov-rhel7
|
||||
openshift_storage_glusterfs_s3_image=registry.access.redhat.com/rhgs3/rhgs-s3-server-rhel7
|
||||
openshift_storage_glusterfs_heketi_image=registry.access.redhat.com/rhgs3/rhgs-volmanager-rhel7
|
||||
|
||||
# Service catalog
|
||||
openshift_hosted_etcd_storage_kind=dynamic
|
||||
openshift_hosted_etcd_storage_volume_name=etcd-vol
|
||||
openshift_hosted_etcd_storage_access_modes=["ReadWriteOnce"]
|
||||
openshift_hosted_etcd_storage_volume_size=SC_STORAGE
|
||||
openshift_hosted_etcd_storage_labels={'storage': 'etcd'}
|
||||
|
||||
# Metrics
|
||||
openshift_metrics_install_metrics=true
|
||||
openshift_metrics_cassandra_storage_type=dynamic
|
||||
openshift_metrics_storage_volume_size=METRICS_STORAGE
|
||||
openshift_metrics_cassandra_nodeselector={"region":"infra"}
|
||||
openshift_metrics_hawkular_nodeselector={"region":"infra"}
|
||||
openshift_metrics_heapster_nodeselector={"region":"infra"}
|
||||
|
||||
# Aggregated logging
|
||||
openshift_logging_install_logging=true
|
||||
openshift_logging_es_pvc_dynamic=true
|
||||
openshift_logging_es_pvc_size=LOGGING_STORAGE
|
||||
openshift_logging_es_cluster_size=3
|
||||
openshift_logging_es_nodeselector={"region":"infra"}
|
||||
openshift_logging_kibana_nodeselector={"region":"infra"}
|
||||
openshift_logging_curator_nodeselector={"region":"infra"}
|
||||
openshift_logging_es_number_of_replicas=1
|
||||
|
||||
openshift_master_identity_providers=[{'name': 'htpasswd_auth','login': 'true','challenge': 'true','kind': 'HTPasswdPasswordIdentityProvider','filename': '/etc/origin/master/htpasswd'}]
|
||||
openshift_master_htpasswd_users={'admin': 'HTPASSWD'}
|
||||
|
||||
openshift_hosted_prometheus_deploy=true
|
||||
openshift_prometheus_node_selector={"region":"infra"}
|
||||
openshift_prometheus_storage_type=pvc
|
||||
|
||||
[masters]
|
||||
CLUSTERID-master-0
|
||||
CLUSTERID-master-1
|
||||
CLUSTERID-master-2
|
||||
|
||||
[etcd]
|
||||
CLUSTERID-master-0
|
||||
CLUSTERID-master-1
|
||||
CLUSTERID-master-2
|
||||
|
||||
[nodes]
|
||||
CLUSTERID-master-0 openshift_node_labels="{'region': 'master'}"
|
||||
CLUSTERID-master-1 openshift_node_labels="{'region': 'master'}"
|
||||
CLUSTERID-master-2 openshift_node_labels="{'region': 'master'}"
|
||||
CLUSTERID-infra-0 openshift_node_labels="{'region': 'infra', 'node-role.kubernetes.io/infra': 'true'}"
|
||||
CLUSTERID-infra-1 openshift_node_labels="{'region': 'infra', 'node-role.kubernetes.io/infra': 'true'}"
|
||||
CLUSTERID-infra-2 openshift_node_labels="{'region': 'infra', 'node-role.kubernetes.io/infra': 'true'}"
|
||||
CLUSTERID-app-0 openshift_node_labels="{'region': 'apps'}"
|
||||
CLUSTERID-app-1 openshift_node_labels="{'region': 'apps'}"
|
||||
CLUSTERID-app-2 openshift_node_labels="{'region': 'apps'}"
|
||||
CLUSTERID-cns-0 openshift_node_labels="{'region': 'cns', 'node-role.kubernetes.io/cns': 'true'}"
|
||||
CLUSTERID-cns-1 openshift_node_labels="{'region': 'cns', 'node-role.kubernetes.io/cns': 'true'}"
|
||||
CLUSTERID-cns-2 openshift_node_labels="{'region': 'cns', 'node-role.kubernetes.io/cns': 'true'}"
|
||||
|
||||
[glusterfs]
|
||||
CLUSTERID-cns-0 glusterfs_devices='[ "/dev/disk/by-id/google-CLUSTERID-cns-0-gluster" ]' openshift_node_local_quota_per_fsgroup=""
|
||||
CLUSTERID-cns-1 glusterfs_devices='[ "/dev/disk/by-id/google-CLUSTERID-cns-1-gluster" ]' openshift_node_local_quota_per_fsgroup=""
|
||||
CLUSTERID-cns-2 glusterfs_devices='[ "/dev/disk/by-id/google-CLUSTERID-cns-2-gluster" ]' openshift_node_local_quota_per_fsgroup=""
|
||||
EOF
|
||||
|
||||
sed -i -e "s/MYUSER/${MYUSER}/g" \
|
||||
-e "s/OCPVER/${OCPVER}/g" \
|
||||
-e "s/CLUSTERID/${CLUSTERID}/g" \
|
||||
-e "s/PROJECTID/${PROJECTID}/g" \
|
||||
-e "s/DOMAIN/${DOMAIN}/g" \
|
||||
-e "s/HTPASSWD/${HTPASSWD}/g" \
|
||||
-e "s/LOGGING_STORAGE/${LOGGING_STORAGE}/g" \
|
||||
-e "s/METRICS_STORAGE/${METRICS_STORAGE}/g" \
|
||||
-e "s/SC_STORAGE/${SC_STORAGE}/g" \
|
||||
./inventory
|
||||
|
||||
if [ -z $RHUSER ]; then
|
||||
ansible nodes -i inventory -b -m redhat_subscription -a \
|
||||
"state=present activationkey=${AK} org_id=${ORGID} pool_ids=${POOLID}"
|
||||
else
|
||||
ansible nodes -i inventory -b -m redhat_subscription -a \
|
||||
"state=present user=${RHUSER} password=${RHPASS} pool_ids=${POOLID}"
|
||||
fi
|
||||
|
||||
ansible nodes -i inventory -b -m shell -a \
|
||||
"subscription-manager repos --disable=\* \
|
||||
--enable=rhel-7-server-rpms \
|
||||
--enable=rhel-7-server-extras-rpms \
|
||||
--enable=rhel-7-server-ose-${OCPVER}-rpms \
|
||||
--enable=rhel-7-fast-datapath-rpms \
|
||||
--enable=rhel-7-server-ansible-2.4-rpms"
|
||||
|
||||
ansible *-infra-* -i inventory -b -m firewalld -a \
|
||||
"port=1936/tcp permanent=true state=enabled"
|
||||
|
||||
ansible nodes -i inventory -b -m firewalld -a \
|
||||
"port=10256/tcp permanent=true state=enabled"
|
||||
|
||||
ansible all -i inventory -b -m yum -a "name=* state=latest"
|
||||
ansible all -i inventory -b -m command -a "reboot"
|
||||
20
reference-architecture/gcp/3.9/bastion.vars
Normal file
20
reference-architecture/gcp/3.9/bastion.vars
Normal file
@@ -0,0 +1,20 @@
|
||||
export OCPVER=3.9
|
||||
# Configure AK & ORGID
|
||||
# or RHUSER & RHPASS
|
||||
export AK=myak
|
||||
export ORGID=6969
|
||||
# RHUSER=
|
||||
# RHPASS=
|
||||
export POOLID=xxx
|
||||
export MYUSER=cloud-user
|
||||
export PROJECTID=refarch
|
||||
export CLUSTERID=refarch
|
||||
export DOMAIN=example.com
|
||||
export SC_STORAGE=1G
|
||||
export METRICS_STORAGE=20Gi
|
||||
export LOGGING_STORAGE=100Gi
|
||||
# 'admin' user password. Generate it with:
|
||||
# htpasswd -nb admin password | awk -F: '{ print $2 }'
|
||||
# Beware with the single quotation marks if the variable contains dollar sign
|
||||
# In this case 'password' is used
|
||||
export HTPASSWD='$apr1$wa4YaR7W$jYiUbDt4WWAuTctQbGXAU0'
|
||||
552
reference-architecture/gcp/3.9/create_infrastructure.sh
Executable file
552
reference-architecture/gcp/3.9/create_infrastructure.sh
Executable file
@@ -0,0 +1,552 @@
|
||||
#!/bin/bash
|
||||
set -eo pipefail
|
||||
|
||||
warnuser(){
|
||||
cat << EOF
|
||||
###########
|
||||
# WARNING #
|
||||
###########
|
||||
This script is distributed WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND
|
||||
Refer to the official documentation
|
||||
https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_google_cloud_platform/
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
die(){
|
||||
echo "$1"
|
||||
exit $2
|
||||
}
|
||||
|
||||
usage(){
|
||||
warnuser
|
||||
echo "$0 <vars_file>"
|
||||
echo " vars_file The file containing all the required variables"
|
||||
echo "Examples:"
|
||||
echo " $0 myvars"
|
||||
}
|
||||
|
||||
if [[ ( $@ == "--help") || $@ == "-h" ]]
|
||||
then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ $# -lt 1 ]]
|
||||
then
|
||||
usage
|
||||
die "vars_file not provided" 2
|
||||
fi
|
||||
|
||||
for i in gcloud gsutil
|
||||
do
|
||||
command -v $i >/dev/null 2>&1 || die "$i required but not found" 3
|
||||
done
|
||||
|
||||
warnuser
|
||||
|
||||
VARSFILE=${1}
|
||||
|
||||
if [[ ! -f ${VARSFILE} ]]
|
||||
then
|
||||
usage
|
||||
die "vars_file not found" 2
|
||||
fi
|
||||
|
||||
read -p "Are you sure? " -n 1 -r
|
||||
echo # (optional) move to a new line
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]
|
||||
then
|
||||
die "User cancel" 4
|
||||
fi
|
||||
|
||||
export CLOUDSDK_CORE_DISABLE_PROMPTS=1
|
||||
|
||||
source ${VARSFILE}
|
||||
|
||||
# Config
|
||||
gcloud config set project ${PROJECTID}
|
||||
gcloud config set compute/region ${REGION}
|
||||
gcloud config set compute/zone ${DEFAULTZONE}
|
||||
|
||||
# Network
|
||||
gcloud compute networks create ${CLUSTERID_NETWORK} --subnet-mode custom
|
||||
|
||||
# Subnet
|
||||
gcloud compute networks subnets create ${CLUSTERID_SUBNET} \
|
||||
--network ${CLUSTERID_NETWORK} \
|
||||
--range ${CLUSTERID_SUBNET_CIDR}
|
||||
|
||||
# External to bastion
|
||||
gcloud compute firewall-rules create ${CLUSTERID}-external-to-bastion \
|
||||
--direction=INGRESS --priority=1000 --network=${CLUSTERID_NETWORK} \
|
||||
--action=ALLOW --rules=tcp:22,icmp \
|
||||
--source-ranges=0.0.0.0/0 --target-tags=${CLUSTERID}-bastion
|
||||
# Bastion to all hosts
|
||||
gcloud compute firewall-rules create ${CLUSTERID}-bastion-to-any \
|
||||
--direction=INGRESS --priority=1000 --network=${CLUSTERID_NETWORK} \
|
||||
--action=ALLOW --rules=all \
|
||||
--source-tags=${CLUSTERID}-bastion --target-tags=${CLUSTERID}-node
|
||||
|
||||
# Nodes to master
|
||||
gcloud compute firewall-rules create ${CLUSTERID}-node-to-master \
|
||||
--direction=INGRESS --priority=1000 --network=${CLUSTERID_NETWORK} \
|
||||
--action=ALLOW --rules=udp:8053,tcp:8053 \
|
||||
--source-tags=${CLUSTERID}-node --target-tags=${CLUSTERID}-master
|
||||
|
||||
# Master to node
|
||||
gcloud compute firewall-rules create ${CLUSTERID}-master-to-node \
|
||||
--direction=INGRESS --priority=1000 --network=${CLUSTERID_NETWORK} \
|
||||
--action=ALLOW --rules=tcp:10250 \
|
||||
--source-tags=${CLUSTERID}-master --target-tags=${CLUSTERID}-node
|
||||
|
||||
# Master to master
|
||||
gcloud compute firewall-rules create ${CLUSTERID}-master-to-master \
|
||||
--direction=INGRESS --priority=1000 --network=${CLUSTERID_NETWORK} \
|
||||
--action=ALLOW --rules=tcp:2379,tcp:2380 \
|
||||
--source-tags=${CLUSTERID}-master --target-tags=${CLUSTERID}-master
|
||||
|
||||
# Any to master
|
||||
gcloud compute firewall-rules create ${CLUSTERID}-any-to-masters \
|
||||
--direction=INGRESS --priority=1000 --network=${CLUSTERID_NETWORK} \
|
||||
--action=ALLOW --rules=tcp:443 \
|
||||
--source-ranges=${CLUSTERID_SUBNET_CIDR} --target-tags=${CLUSTERID}-master
|
||||
|
||||
# Infra node to infra node
|
||||
gcloud compute firewall-rules create ${CLUSTERID}-infra-to-infra \
|
||||
--direction=INGRESS --priority=1000 --network=${CLUSTERID_NETWORK} \
|
||||
--action=ALLOW --rules=tcp:9200,tcp:9300 \
|
||||
--source-tags=${CLUSTERID}-infra --target-tags=${CLUSTERID}-infra
|
||||
|
||||
# Routers
|
||||
gcloud compute firewall-rules create ${CLUSTERID}-any-to-routers \
|
||||
--direction=INGRESS --priority=1000 --network=${CLUSTERID_NETWORK} \
|
||||
--source-ranges 0.0.0.0/0 \
|
||||
--target-tags ${CLUSTERID}-infra \
|
||||
--allow tcp:443,tcp:80
|
||||
|
||||
# Node to node SDN
|
||||
gcloud compute firewall-rules create ${CLUSTERID}-node-to-node \
|
||||
--direction=INGRESS --priority=1000 --network=${CLUSTERID_NETWORK} \
|
||||
--action=ALLOW --rules=udp:4789 \
|
||||
--source-tags=${CLUSTERID}-node --target-tags=${CLUSTERID}-node
|
||||
|
||||
# Infra to node kubelet
|
||||
gcloud compute firewall-rules create ${CLUSTERID}-infra-to-node \
|
||||
--direction=INGRESS --priority=1000 --network=${CLUSTERID_NETWORK} \
|
||||
--action=ALLOW --rules=tcp:10250 \
|
||||
--source-tags=${CLUSTERID}-infra --target-tags=${CLUSTERID}-node
|
||||
|
||||
# CNS to CNS node
|
||||
gcloud compute firewall-rules create ${CLUSTERID}-cns-to-cns \
|
||||
--direction=INGRESS --priority=1000 --network=${CLUSTERID_NETWORK} \
|
||||
--action=ALLOW --rules=tcp:2222 \
|
||||
--source-tags=${CLUSTERID}-cns --target-tags=${CLUSTERID}-cns
|
||||
|
||||
# Node to CNS node (client)
|
||||
gcloud compute firewall-rules create ${CLUSTERID}-node-to-cns \
|
||||
--direction=INGRESS --priority=1000 --network=${CLUSTERID_NETWORK} \
|
||||
--action=ALLOW \
|
||||
--rules=tcp:111,udp:111,tcp:3260,tcp:24007-24010,tcp:49152-49664 \
|
||||
--source-tags=${CLUSTERID}-node --target-tags=${CLUSTERID}-cns
|
||||
|
||||
# Masters load balancer
|
||||
gcloud compute addresses create ${CLUSTERID}-master-lb \
|
||||
--ip-version=IPV4 \
|
||||
--global
|
||||
|
||||
# Applications load balancer
|
||||
gcloud compute addresses create ${CLUSTERID}-apps-lb \
|
||||
--region ${REGION}
|
||||
|
||||
# Bastion host
|
||||
gcloud compute addresses create ${CLUSTERID}-bastion \
|
||||
--region ${REGION}
|
||||
|
||||
# Masters load balancer entry
|
||||
export LBIP=$(gcloud compute addresses list \
|
||||
--filter="name:${CLUSTERID}-master-lb" --format="value(address)")
|
||||
|
||||
gcloud dns record-sets transaction start --zone=${DNSZONE}
|
||||
|
||||
gcloud dns record-sets transaction add \
|
||||
${LBIP} --name=${CLUSTERID}-ocp.${DOMAIN} --ttl=${TTL} --type=A \
|
||||
--zone=${DNSZONE}
|
||||
gcloud dns record-sets transaction execute --zone=${DNSZONE}
|
||||
|
||||
# Applications load balancer entry
|
||||
export APPSLBIP=$(gcloud compute addresses list \
|
||||
--filter="name:${CLUSTERID}-apps-lb" --format="value(address)")
|
||||
|
||||
gcloud dns record-sets transaction start --zone=${DNSZONE}
|
||||
|
||||
gcloud dns record-sets transaction add \
|
||||
${APPSLBIP} --name=\*.${CLUSTERID}-apps.${DOMAIN} --ttl=${TTL} --type=A \
|
||||
--zone=${DNSZONE}
|
||||
|
||||
gcloud dns record-sets transaction execute --zone=${DNSZONE}
|
||||
|
||||
# Bastion host
|
||||
export BASTIONIP=$(gcloud compute addresses list \
|
||||
--filter="name:${CLUSTERID}-bastion" --format="value(address)")
|
||||
|
||||
gcloud dns record-sets transaction start --zone=${DNSZONE}
|
||||
|
||||
gcloud dns record-sets transaction add \
|
||||
${BASTIONIP} --name=${CLUSTERID}-bastion.${DOMAIN} --ttl=${TTL} --type=A \
|
||||
--zone=${DNSZONE}
|
||||
|
||||
gcloud dns record-sets transaction execute --zone=${DNSZONE}
|
||||
|
||||
export BASTIONIP=$(gcloud compute addresses list \
|
||||
--filter="name:${CLUSTERID}-bastion" --format="value(address)")
|
||||
|
||||
gcloud compute instances create ${CLUSTERID}-bastion \
|
||||
--machine-type=${BASTIONSIZE} \
|
||||
--subnet=${CLUSTERID_SUBNET} \
|
||||
--address=${BASTIONIP} \
|
||||
--maintenance-policy=MIGRATE \
|
||||
--scopes=https://www.googleapis.com/auth/cloud.useraccounts.readonly,https://www.googleapis.com/auth/compute,https://www.googleapis.com/auth/devstorage.read_write,https://www.googleapis.com/auth/logging.write,https://www.googleapis.com/auth/monitoring.write,https://www.googleapis.com/auth/service.management.readonly,https://www.googleapis.com/auth/servicecontrol \
|
||||
--tags=${CLUSTERID}-bastion \
|
||||
--metadata "ocp-cluster=${CLUSTERID},${CLUSTERID}-type=bastion" \
|
||||
--image=${RHELIMAGE} --image-project=${IMAGEPROJECT} \
|
||||
--boot-disk-size=${BASTIONDISKSIZE} --boot-disk-type=pd-ssd \
|
||||
--boot-disk-device-name=${CLUSTERID}-bastion \
|
||||
--zone=${DEFAULTZONE}
|
||||
|
||||
cat <<'EOF' > ./master.sh
|
||||
#!/bin/bash
|
||||
LOCALVOLDEVICE=$(readlink -f /dev/disk/by-id/google-*local*)
|
||||
ETCDDEVICE=$(readlink -f /dev/disk/by-id/google-*etcd*)
|
||||
CONTAINERSDEVICE=$(readlink -f /dev/disk/by-id/google-*containers*)
|
||||
LOCALDIR="/var/lib/origin/openshift.local.volumes"
|
||||
ETCDDIR="/var/lib/etcd"
|
||||
CONTAINERSDIR="/var/lib/docker"
|
||||
|
||||
for device in ${LOCALVOLDEVICE} ${ETCDDEVICE} ${CONTAINERSDEVICE}
|
||||
do
|
||||
mkfs.xfs ${device}
|
||||
done
|
||||
|
||||
for dir in ${LOCALDIR} ${ETCDDIR} ${CONTAINERSDIR}
|
||||
do
|
||||
mkdir -p ${dir}
|
||||
restorecon -R ${dir}
|
||||
done
|
||||
|
||||
echo UUID=$(blkid -s UUID -o value ${LOCALVOLDEVICE}) ${LOCALDIR} xfs defaults,discard,gquota 0 2 >> /etc/fstab
|
||||
echo UUID=$(blkid -s UUID -o value ${ETCDDEVICE}) ${ETCDDIR} xfs defaults,discard 0 2 >> /etc/fstab
|
||||
echo UUID=$(blkid -s UUID -o value ${CONTAINERSDEVICE}) ${CONTAINERSDIR} xfs defaults,discard 0 2 >> /etc/fstab
|
||||
|
||||
mount -a
|
||||
EOF
|
||||
|
||||
eval "$MYZONES_LIST"
|
||||
|
||||
for i in $(seq 0 $((${MASTER_NODE_COUNT}-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute disks create ${CLUSTERID}-master-${i}-etcd \
|
||||
--type=pd-ssd --size=${ETCDSIZE} --zone=${zone[$i]}
|
||||
gcloud compute disks create ${CLUSTERID}-master-${i}-containers \
|
||||
--type=pd-ssd --size=${MASTERCONTAINERSSIZE} --zone=${zone[$i]}
|
||||
gcloud compute disks create ${CLUSTERID}-master-${i}-local \
|
||||
--type=pd-ssd --size=${MASTERLOCALSIZE} --zone=${zone[$i]}
|
||||
done
|
||||
|
||||
# Master instances multizone and single zone support
|
||||
for i in $(seq 0 $((${MASTER_NODE_COUNT}-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute instances create ${CLUSTERID}-master-${i} \
|
||||
--machine-type=${MASTERSIZE} \
|
||||
--subnet=${CLUSTERID_SUBNET} \
|
||||
--address="" --no-public-ptr \
|
||||
--maintenance-policy=MIGRATE \
|
||||
--scopes=https://www.googleapis.com/auth/cloud.useraccounts.readonly,https://www.googleapis.com/auth/compute,https://www.googleapis.com/auth/devstorage.read_only,https://www.googleapis.com/auth/logging.write,https://www.googleapis.com/auth/monitoring.write,https://www.googleapis.com/auth/service.management.readonly,https://www.googleapis.com/auth/servicecontrol \
|
||||
--tags=${CLUSTERID}-master,${CLUSTERID}-node \
|
||||
--metadata "ocp-cluster=${CLUSTERID},${CLUSTERID}-type=master" \
|
||||
--image=${RHELIMAGE} --image-project=${IMAGEPROJECT} \
|
||||
--boot-disk-size=${MASTERDISKSIZE} --boot-disk-type=pd-ssd \
|
||||
--boot-disk-device-name=${CLUSTERID}-master-${i} \
|
||||
--disk=name=${CLUSTERID}-master-${i}-etcd,device-name=${CLUSTERID}-master-${i}-etcd,mode=rw,boot=no \
|
||||
--disk=name=${CLUSTERID}-master-${i}-containers,device-name=${CLUSTERID}-master-${i}-containers,mode=rw,boot=no \
|
||||
--disk=name=${CLUSTERID}-master-${i}-local,device-name=${CLUSTERID}-master-${i}-local,mode=rw,boot=no \
|
||||
--metadata-from-file startup-script=./master.sh \
|
||||
--zone=${zone[$i]}
|
||||
done
|
||||
|
||||
cat <<'EOF' > ./node.sh
|
||||
#!/bin/bash
|
||||
LOCALVOLDEVICE=$(readlink -f /dev/disk/by-id/google-*local*)
|
||||
CONTAINERSDEVICE=$(readlink -f /dev/disk/by-id/google-*containers*)
|
||||
LOCALDIR="/var/lib/origin/openshift.local.volumes"
|
||||
CONTAINERSDIR="/var/lib/docker"
|
||||
|
||||
for device in ${LOCALVOLDEVICE} ${CONTAINERSDEVICE}
|
||||
do
|
||||
mkfs.xfs ${device}
|
||||
done
|
||||
|
||||
for dir in ${LOCALDIR} ${CONTAINERSDIR}
|
||||
do
|
||||
mkdir -p ${dir}
|
||||
restorecon -R ${dir}
|
||||
done
|
||||
|
||||
echo UUID=$(blkid -s UUID -o value ${LOCALVOLDEVICE}) ${LOCALDIR} xfs defaults,discard,gquota 0 2 >> /etc/fstab
|
||||
echo UUID=$(blkid -s UUID -o value ${CONTAINERSDEVICE}) ${CONTAINERSDIR} xfs defaults,discard 0 2 >> /etc/fstab
|
||||
|
||||
mount -a
|
||||
EOF
|
||||
|
||||
# Disks multizone and single zone support
|
||||
eval "$MYZONES_LIST"
|
||||
|
||||
for i in $(seq 0 $(($INFRA_NODE_COUNT-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute disks create ${CLUSTERID}-infra-${i}-containers \
|
||||
--type=pd-ssd --size=${INFRACONTAINERSSIZE} --zone=${zone[$i]}
|
||||
gcloud compute disks create ${CLUSTERID}-infra-${i}-local \
|
||||
--type=pd-ssd --size=${INFRALOCALSIZE} --zone=${zone[$i]}
|
||||
done
|
||||
|
||||
# Infrastructure instances multizone and single zone support
|
||||
for i in $(seq 0 $(($INFRA_NODE_COUNT-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute instances create ${CLUSTERID}-infra-${i} \
|
||||
--machine-type=${INFRASIZE} \
|
||||
--subnet=${CLUSTERID_SUBNET} \
|
||||
--address="" --no-public-ptr \
|
||||
--maintenance-policy=MIGRATE \
|
||||
--scopes=https://www.googleapis.com/auth/cloud.useraccounts.readonly,https://www.googleapis.com/auth/compute,https://www.googleapis.com/auth/devstorage.read_write,https://www.googleapis.com/auth/logging.write,https://www.googleapis.com/auth/monitoring.write,https://www.googleapis.com/auth/service.management.readonly,https://www.googleapis.com/auth/servicecontrol \
|
||||
--tags=${CLUSTERID}-infra,${CLUSTERID}-node,${CLUSTERID}ocp \
|
||||
--metadata "ocp-cluster=${CLUSTERID},${CLUSTERID}-type=infra" \
|
||||
--image=${RHELIMAGE} --image-project=${IMAGEPROJECT} \
|
||||
--boot-disk-size=${INFRADISKSIZE} --boot-disk-type=pd-ssd \
|
||||
--boot-disk-device-name=${CLUSTERID}-infra-${i} \
|
||||
--disk=name=${CLUSTERID}-infra-${i}-containers,device-name=${CLUSTERID}-infra-${i}-containers,mode=rw,boot=no \
|
||||
--disk=name=${CLUSTERID}-infra-${i}-local,device-name=${CLUSTERID}-infra-${i}-local,mode=rw,boot=no \
|
||||
--metadata-from-file startup-script=./node.sh \
|
||||
--zone=${zone[$i]}
|
||||
done
|
||||
|
||||
# Disks multizone and single zone support
|
||||
eval "$MYZONES_LIST"
|
||||
|
||||
for i in $(seq 0 $(($APP_NODE_COUNT-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute disks create ${CLUSTERID}-app-${i}-containers \
|
||||
--type=pd-ssd --size=${APPCONTAINERSSIZE} --zone=${zone[$i]}
|
||||
gcloud compute disks create ${CLUSTERID}-app-${i}-local \
|
||||
--type=pd-ssd --size=${APPLOCALSIZE} --zone=${zone[$i]}
|
||||
done
|
||||
|
||||
# Application instances multizone and single zone support
|
||||
for i in $(seq 0 $(($APP_NODE_COUNT-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute instances create ${CLUSTERID}-app-${i} \
|
||||
--machine-type=${APPSIZE} \
|
||||
--subnet=${CLUSTERID_SUBNET} \
|
||||
--address="" --no-public-ptr \
|
||||
--maintenance-policy=MIGRATE \
|
||||
--scopes=https://www.googleapis.com/auth/cloud.useraccounts.readonly,https://www.googleapis.com/auth/compute,https://www.googleapis.com/auth/devstorage.read_only,https://www.googleapis.com/auth/logging.write,https://www.googleapis.com/auth/monitoring.write,https://www.googleapis.com/auth/service.management.readonly,https://www.googleapis.com/auth/servicecontrol \
|
||||
--tags=${CLUSTERID}-node,${CLUSTERID}ocp \
|
||||
--metadata "ocp-cluster=${CLUSTERID},${CLUSTERID}-type=app" \
|
||||
--image=${RHELIMAGE} --image-project=${IMAGEPROJECT} \
|
||||
--boot-disk-size=${INFRADISKSIZE} --boot-disk-type=pd-ssd \
|
||||
--boot-disk-device-name=${CLUSTERID}-app-${i} \
|
||||
--disk=name=${CLUSTERID}-app-${i}-containers,device-name=${CLUSTERID}-app-${i}-containers,mode=rw,boot=no \
|
||||
--disk=name=${CLUSTERID}-app-${i}-local,device-name=${CLUSTERID}-app-${i}-local,mode=rw,boot=no \
|
||||
--metadata-from-file startup-script=./node.sh \
|
||||
--zone=${zone[$i]}
|
||||
done
|
||||
|
||||
# Health check
|
||||
gcloud compute health-checks create https ${CLUSTERID}-master-lb-healthcheck \
|
||||
--port 443 --request-path "/healthz" --check-interval=10s --timeout=10s \
|
||||
--healthy-threshold=3 --unhealthy-threshold=3
|
||||
|
||||
# Create backend and set client ip affinity to avoid websocket timeout
|
||||
gcloud compute backend-services create ${CLUSTERID}-master-lb-backend \
|
||||
--global \
|
||||
--protocol TCP \
|
||||
--session-affinity CLIENT_IP \
|
||||
--health-checks ${CLUSTERID}-master-lb-healthcheck \
|
||||
--port-name ocp-api
|
||||
|
||||
eval "$MYZONES_LIST"
|
||||
|
||||
# Multizone and single zone support for instance groups
|
||||
for i in $(seq 0 $((${#ZONES[@]}-1))); do
|
||||
ZONE=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute instance-groups unmanaged create ${CLUSTERID}-masters-${ZONE} \
|
||||
--zone=${ZONE}
|
||||
gcloud compute instance-groups unmanaged set-named-ports \
|
||||
${CLUSTERID}-masters-${ZONE} --named-ports=ocp-api:443 --zone=${ZONE}
|
||||
gcloud compute instance-groups unmanaged add-instances \
|
||||
${CLUSTERID}-masters-${ZONE} --instances=${CLUSTERID}-master-${i} \
|
||||
--zone=${ZONE}
|
||||
# Instances are added to the backend service
|
||||
gcloud compute backend-services add-backend ${CLUSTERID}-master-lb-backend \
|
||||
--global \
|
||||
--instance-group ${CLUSTERID}-masters-${ZONE} \
|
||||
--instance-group-zone ${ZONE}
|
||||
done
|
||||
|
||||
# Do not set any proxy header to be transparent
|
||||
gcloud compute target-tcp-proxies create ${CLUSTERID}-master-lb-target-proxy \
|
||||
--backend-service ${CLUSTERID}-master-lb-backend \
|
||||
--proxy-header NONE
|
||||
|
||||
export LBIP=$(gcloud compute addresses list \
|
||||
--filter="name:${CLUSTERID}-master-lb" --format="value(address)")
|
||||
|
||||
# Forward only 443/tcp port
|
||||
gcloud compute forwarding-rules create \
|
||||
${CLUSTERID}-master-lb-forwarding-rule \
|
||||
--global \
|
||||
--target-tcp-proxy ${CLUSTERID}-master-lb-target-proxy \
|
||||
--address ${LBIP} \
|
||||
--ports 443
|
||||
|
||||
# Allow health checks from Google health check IPs
|
||||
gcloud compute firewall-rules create ${CLUSTERID}-healthcheck-to-lb \
|
||||
--direction=INGRESS --priority=1000 --network=${CLUSTERID_NETWORK} \
|
||||
--source-ranges 130.211.0.0/22,35.191.0.0/16 \
|
||||
--target-tags ${CLUSTERID}-master \
|
||||
--allow tcp:443
|
||||
|
||||
# Health check
|
||||
gcloud compute http-health-checks create ${CLUSTERID}-infra-lb-healthcheck \
|
||||
--port 1936 --request-path "/healthz" --check-interval=10s --timeout=10s \
|
||||
--healthy-threshold=3 --unhealthy-threshold=3
|
||||
|
||||
# Target Pool
|
||||
gcloud compute target-pools create ${CLUSTERID}-infra \
|
||||
--http-health-check ${CLUSTERID}-infra-lb-healthcheck
|
||||
|
||||
for i in $(seq 0 $(($INFRA_NODE_COUNT-1))); do
|
||||
gcloud compute target-pools add-instances ${CLUSTERID}-infra \
|
||||
--instances=${CLUSTERID}-infra-${i}
|
||||
done
|
||||
|
||||
# Forwarding rules and firewall rules
|
||||
export APPSLBIP=$(gcloud compute addresses list \
|
||||
--filter="name:${CLUSTERID}-apps-lb" --format="value(address)")
|
||||
|
||||
gcloud compute forwarding-rules create ${CLUSTERID}-infra-http \
|
||||
--ports 80 \
|
||||
--address ${APPSLBIP} \
|
||||
--region ${REGION} \
|
||||
--target-pool ${CLUSTERID}-infra
|
||||
|
||||
gcloud compute forwarding-rules create ${CLUSTERID}-infra-https \
|
||||
--ports 443 \
|
||||
--address ${APPSLBIP} \
|
||||
--region ${REGION} \
|
||||
--target-pool ${CLUSTERID}-infra
|
||||
|
||||
# Bucket to host registry
|
||||
gsutil mb -l ${REGION} gs://${CLUSTERID}-registry
|
||||
|
||||
cat <<EOF > labels.json
|
||||
{
|
||||
"ocp-cluster": "${CLUSTERID}"
|
||||
}
|
||||
EOF
|
||||
|
||||
gsutil label set labels.json gs://${CLUSTERID}-registry
|
||||
|
||||
rm -f labels.json
|
||||
|
||||
cat <<'EOF' > ./cns.sh
|
||||
#!/bin/bash
|
||||
CONTAINERSDEVICE=$(readlink -f /dev/disk/by-id/google-*containers*)
|
||||
CONTAINERSDIR="/var/lib/docker"
|
||||
|
||||
mkfs.xfs ${CONTAINERSDEVICE}
|
||||
mkdir -p ${CONTAINERSDIR}
|
||||
restorecon -R ${CONTAINERSDIR}
|
||||
|
||||
echo UUID=$(blkid -s UUID -o value ${CONTAINERSDEVICE}) ${CONTAINERSDIR} xfs defaults,discard 0 2 >> /etc/fstab
|
||||
|
||||
mount -a
|
||||
EOF
|
||||
|
||||
# Disks multizone and single zone support
|
||||
eval "$MYZONES_LIST"
|
||||
|
||||
for i in $(seq 0 $(($CNS_NODE_COUNT-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute disks create ${CLUSTERID}-cns-${i}-containers \
|
||||
--type=pd-ssd --size=${CNSCONTAINERSSIZE} --zone=${zone[$i]}
|
||||
gcloud compute disks create ${CLUSTERID}-cns-${i}-gluster \
|
||||
--type=pd-ssd --size=${CNSGLUSTERSIZE} --zone=${zone[$i]}
|
||||
done
|
||||
|
||||
# CNS instances multizone and single zone support
|
||||
for i in $(seq 0 $(($CNS_NODE_COUNT-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute instances create ${CLUSTERID}-cns-${i} \
|
||||
--machine-type=${CNSSIZE} \
|
||||
--subnet=${CLUSTERID_SUBNET} \
|
||||
--address="" --no-public-ptr \
|
||||
--maintenance-policy=MIGRATE \
|
||||
--scopes=https://www.googleapis.com/auth/cloud.useraccounts.readonly,https://www.googleapis.com/auth/compute,https://www.googleapis.com/auth/devstorage.read_write,https://www.googleapis.com/auth/logging.write,https://www.googleapis.com/auth/monitoring.write,https://www.googleapis.com/auth/service.management.readonly,https://www.googleapis.com/auth/servicecontrol\
|
||||
--tags=${CLUSTERID}-cns,${CLUSTERID}-node,${CLUSTERID}ocp \
|
||||
--metadata "ocp-cluster=${CLUSTERID},${CLUSTERID}-type=cns" \
|
||||
--image=${RHELIMAGE} --image-project=${IMAGEPROJECT} \
|
||||
--boot-disk-size=${CNSDISKSIZE} --boot-disk-type=pd-ssd \
|
||||
--boot-disk-device-name=${CLUSTERID}-cns-${i} \
|
||||
--disk=name=${CLUSTERID}-cns-${i}-containers,device-name=${CLUSTERID}-cns-${i}-containers,mode=rw,boot=no \
|
||||
--disk=name=${CLUSTERID}-cns-${i}-gluster,device-name=${CLUSTERID}-cns-${i}-gluster,mode=rw,boot=no \
|
||||
--metadata-from-file startup-script=./cns.sh \
|
||||
--zone=${zone[$i]}
|
||||
done
|
||||
|
||||
sleep 180
|
||||
|
||||
eval "$MYZONES_LIST"
|
||||
|
||||
# Masters
|
||||
for i in $(seq 0 $((${MASTER_NODE_COUNT}-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute instances remove-metadata \
|
||||
--keys startup-script ${CLUSTERID}-master-${i} --zone=${zone[$i]}
|
||||
done
|
||||
|
||||
# Application nodes
|
||||
for i in $(seq 0 $(($APP_NODE_COUNT-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute instances remove-metadata \
|
||||
--keys startup-script ${CLUSTERID}-app-${i} --zone=${zone[$i]}
|
||||
done
|
||||
|
||||
# Infrastructure nodes
|
||||
for i in $(seq 0 $(($INFRA_NODE_COUNT-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute instances remove-metadata \
|
||||
--keys startup-script ${CLUSTERID}-infra-${i} --zone=${zone[$i]}
|
||||
done
|
||||
|
||||
# CNS nodes
|
||||
for i in $(seq 0 $(($CNS_NODE_COUNT-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute instances remove-metadata \
|
||||
--keys startup-script ${CLUSTERID}-cns-${i} --zone=${zone[$i]}
|
||||
done
|
||||
|
||||
gcloud compute firewall-rules create \
|
||||
${CLUSTERID}-prometheus-infranode-to-node \
|
||||
--direction=INGRESS --priority=1000 --network=${CLUSTERID_NETWORK} \
|
||||
--action=ALLOW --rules=tcp:9100,tcp:10250 \
|
||||
--source-tags=${CLUSTERID}-infra --target-tags=${CLUSTERID}-node
|
||||
|
||||
gcloud compute firewall-rules create \
|
||||
${CLUSTERID}-prometheus-infranode-to-master \
|
||||
--direction=INGRESS --priority=1000 --network=${CLUSTERID_NETWORK} \
|
||||
--action=ALLOW --rules=tcp:8444 \
|
||||
--source-tags=${CLUSTERID}-infra --target-tags=${CLUSTERID}-master
|
||||
|
||||
echo "Finished provisioning infrastructure objects"
|
||||
|
||||
exit 0
|
||||
183
reference-architecture/gcp/3.9/delete_infrastructure.sh
Executable file
183
reference-architecture/gcp/3.9/delete_infrastructure.sh
Executable file
@@ -0,0 +1,183 @@
|
||||
#!/bin/bash
|
||||
set -eo pipefail
|
||||
|
||||
warnuser(){
|
||||
cat << EOF
|
||||
###########
|
||||
# WARNING #
|
||||
###########
|
||||
This script is distributed WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND
|
||||
Refer to the official documentation
|
||||
https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_google_cloud_platform/
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
die(){
|
||||
echo "$1"
|
||||
exit $2
|
||||
}
|
||||
|
||||
usage(){
|
||||
warnuser
|
||||
echo "$0 <vars_file>"
|
||||
echo " vars_file The file containing all the required variables"
|
||||
echo "Examples:"
|
||||
echo " $0 myvars"
|
||||
}
|
||||
|
||||
if [[ ( $@ == "--help") || $@ == "-h" ]]
|
||||
then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ $# -lt 1 ]]
|
||||
then
|
||||
usage
|
||||
die "vars_file not provided" 2
|
||||
fi
|
||||
|
||||
for i in gcloud gsutil
|
||||
do
|
||||
command -v $i >/dev/null 2>&1 || die "$i required but not found" 3
|
||||
done
|
||||
|
||||
warnuser
|
||||
|
||||
VARSFILE=${1}
|
||||
|
||||
if [[ ! -f ${VARSFILE} ]]
|
||||
then
|
||||
usage
|
||||
die "vars_file not found" 2
|
||||
fi
|
||||
|
||||
read -p "Are you sure to delete all your OCP infrastructure? " -n 1 -r
|
||||
echo # (optional) move to a new line
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]
|
||||
then
|
||||
die "User cancel" 4
|
||||
fi
|
||||
|
||||
export CLOUDSDK_CORE_DISABLE_PROMPTS=1
|
||||
|
||||
source ${VARSFILE}
|
||||
|
||||
eval "$MYZONES_LIST"
|
||||
# Bucket
|
||||
gsutil rb gs://${CLUSTERID}-registry
|
||||
# Apps LB
|
||||
gcloud compute firewall-rules delete ${CLUSTERID}-any-to-routers
|
||||
gcloud compute forwarding-rules delete ${CLUSTERID}-infra-https --region ${REGION}
|
||||
gcloud compute forwarding-rules delete ${CLUSTERID}-infra-http --region ${REGION}
|
||||
gcloud compute target-pools delete ${CLUSTERID}-infra
|
||||
gcloud compute http-health-checks delete ${CLUSTERID}-infra-lb-healthcheck
|
||||
# Masters LB
|
||||
gcloud compute firewall-rules delete ${CLUSTERID}-healthcheck-to-lb
|
||||
gcloud compute forwarding-rules delete ${CLUSTERID}-master-lb-forwarding-rule \
|
||||
--global
|
||||
gcloud compute target-tcp-proxies delete ${CLUSTERID}-master-lb-target-proxy
|
||||
gcloud compute backend-services delete ${CLUSTERID}-master-lb-backend --global
|
||||
for i in $(seq 0 $((${#ZONES[@]}-1))); do
|
||||
ZONE=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute instance-groups unmanaged delete ${CLUSTERID}-masters-${ZONE} \
|
||||
--zone=${ZONE}
|
||||
done
|
||||
gcloud compute health-checks delete ${CLUSTERID}-master-lb-healthcheck
|
||||
# App instances
|
||||
for i in $(seq 0 $(($APP_NODE_COUNT-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute instances delete ${CLUSTERID}-app-${i} --zone=${zone[$i]}
|
||||
done
|
||||
# App disk
|
||||
for i in $(seq 0 $(($APP_NODE_COUNT-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute disks delete ${CLUSTERID}-app-${i}-containers \
|
||||
--zone=${zone[$i]}
|
||||
gcloud compute disks delete ${CLUSTERID}-app-${i}-local \
|
||||
--zone=${zone[$i]}
|
||||
done
|
||||
# Infra instances
|
||||
for i in $(seq 0 $(($INFRA_NODE_COUNT-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute instances delete ${CLUSTERID}-infra-${i} --zone=${zone[$i]}
|
||||
done
|
||||
for i in $(seq 0 $(($INFRA_NODE_COUNT-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute disks delete ${CLUSTERID}-infra-${i}-containers \
|
||||
--zone=${zone[$i]}
|
||||
gcloud compute disks delete ${CLUSTERID}-infra-${i}-local \
|
||||
--zone=${zone[$i]}
|
||||
done
|
||||
# Masters
|
||||
for i in $(seq 0 $((${MASTER_NODE_COUNT}-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute instances delete ${CLUSTERID}-master-${i} --zone=${zone[$i]}
|
||||
done
|
||||
for i in $(seq 0 $((${MASTER_NODE_COUNT}-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute disks delete ${CLUSTERID}-master-${i}-etcd \
|
||||
--zone=${zone[$i]}
|
||||
gcloud compute disks delete ${CLUSTERID}-master-${i}-containers \
|
||||
--zone=${zone[$i]}
|
||||
gcloud compute disks delete ${CLUSTERID}-master-${i}-local \
|
||||
--zone=${zone[$i]}
|
||||
done
|
||||
# CNS
|
||||
for i in $(seq 0 $((${CNS_NODE_COUNT}-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute instances delete ${CLUSTERID}-cns-${i} --zone=${zone[$i]}
|
||||
done
|
||||
for i in $(seq 0 $((${CNS_NODE_COUNT}-1))); do
|
||||
zone[$i]=${ZONES[$i % ${#ZONES[@]}]}
|
||||
gcloud compute disks delete ${CLUSTERID}-cns-${i}-containers \
|
||||
--zone=${zone[$i]}
|
||||
gcloud compute disks delete ${CLUSTERID}-cns-${i}-gluster \
|
||||
--zone=${zone[$i]}
|
||||
done
|
||||
# BASTION
|
||||
gcloud compute instances delete ${CLUSTERID}-bastion
|
||||
# DNS records
|
||||
export LBIP=$(gcloud compute addresses list \
|
||||
--filter="name:${CLUSTERID}-master-lb" --format="value(address)")
|
||||
gcloud dns record-sets transaction start --zone=${DNSZONE}
|
||||
gcloud dns record-sets transaction remove \
|
||||
${LBIP} --name=${CLUSTERID}-ocp.${DOMAIN} --ttl=${TTL} --type=A --zone=${DNSZONE}
|
||||
gcloud dns record-sets transaction execute --zone=${DNSZONE}
|
||||
|
||||
export APPSLBIP=$(gcloud compute addresses list \
|
||||
--filter="name:${CLUSTERID}-apps-lb" --format="value(address)")
|
||||
gcloud dns record-sets transaction start --zone=${DNSZONE}
|
||||
gcloud dns record-sets transaction remove \
|
||||
${APPSLBIP} --name=\*.${CLUSTERID}-apps.${DOMAIN} --ttl=${TTL} --type=A --zone=${DNSZONE}
|
||||
gcloud dns record-sets transaction execute --zone=${DNSZONE}
|
||||
|
||||
export BASTIONIP=$(gcloud compute addresses list \
|
||||
--filter="name:${CLUSTERID}-bastion" --format="value(address)")
|
||||
gcloud dns record-sets transaction start --zone=${DNSZONE}
|
||||
gcloud dns record-sets transaction remove \
|
||||
${BASTIONIP} --name=${CLUSTERID}-bastion.${DOMAIN} --ttl=${TTL} --type=A --zone=${DNSZONE}
|
||||
gcloud dns record-sets transaction execute --zone=${DNSZONE}
|
||||
|
||||
# External IPs
|
||||
gcloud compute addresses delete ${CLUSTERID}-master-lb --global
|
||||
gcloud compute addresses delete ${CLUSTERID}-apps-lb --region ${REGION}
|
||||
gcloud compute addresses delete ${CLUSTERID}-bastion --region ${REGION}
|
||||
|
||||
gcloud compute firewall-rules delete ${CLUSTERID}-external-to-bastion
|
||||
gcloud compute firewall-rules delete ${CLUSTERID}-node-to-node
|
||||
gcloud compute firewall-rules delete ${CLUSTERID}-node-to-master
|
||||
gcloud compute firewall-rules delete ${CLUSTERID}-any-to-masters
|
||||
gcloud compute firewall-rules delete ${CLUSTERID}-master-to-node
|
||||
gcloud compute firewall-rules delete ${CLUSTERID}-master-to-master
|
||||
gcloud compute firewall-rules delete ${CLUSTERID}-bastion-to-any
|
||||
gcloud compute firewall-rules delete ${CLUSTERID}-infra-to-infra
|
||||
gcloud compute firewall-rules delete ${CLUSTERID}-infra-to-node
|
||||
gcloud compute firewall-rules delete ${CLUSTERID}-cns-to-cns
|
||||
gcloud compute firewall-rules delete ${CLUSTERID}-node-to-cns
|
||||
gcloud compute firewall-rules delete ${CLUSTERID}-prometheus-infranode-to-node
|
||||
gcloud compute firewall-rules delete ${CLUSTERID}-prometheus-infranode-to-master
|
||||
|
||||
gcloud compute networks subnets delete ${CLUSTERID_SUBNET}
|
||||
gcloud compute networks delete ${CLUSTERID_NETWORK}
|
||||
56
reference-architecture/gcp/3.9/infrastructure.vars
Normal file
56
reference-architecture/gcp/3.9/infrastructure.vars
Normal file
@@ -0,0 +1,56 @@
|
||||
# Google Project ID
|
||||
export PROJECTID="refarch-204310"
|
||||
# Google Region
|
||||
export REGION="us-west1"
|
||||
export DEFAULTZONE="us-west1-a"
|
||||
# For multizone deployments
|
||||
ZONES=("us-west1-a" "us-west1-b" "us-west1-c")
|
||||
# For single zone deployments
|
||||
# ZONES=("us-west1-a")
|
||||
export MYZONES_LIST="$(declare -p ZONES)"
|
||||
# OpenShift Cluster ID
|
||||
export CLUSTERID="refarch"
|
||||
# Network and subnet configuration
|
||||
export CLUSTERID_NETWORK="${CLUSTERID}-net"
|
||||
export CLUSTERID_SUBNET="${CLUSTERID}-subnet"
|
||||
# Subnet CIDR, modify if needed
|
||||
export CLUSTERID_SUBNET_CIDR="10.240.0.0/24"
|
||||
# DNS
|
||||
export DNSZONE="example-com"
|
||||
export DOMAIN="gce.example.com."
|
||||
export TTL=3600
|
||||
# RHEL image to be used
|
||||
export RHELIMAGE="${CLUSTERID}-rhel-image"
|
||||
export IMAGEPROJECT="${PROJECTID}"
|
||||
# Bastion settings
|
||||
export BASTIONDISKSIZE="20GB"
|
||||
export BASTIONSIZE="g1-small"
|
||||
# Master nodes settings
|
||||
export MASTER_NODE_COUNT=3
|
||||
export MASTERDISKSIZE="40GB"
|
||||
export MASTERSIZE="n1-standard-8"
|
||||
export ETCDSIZE="50GB"
|
||||
export MASTERCONTAINERSSIZE="20GB"
|
||||
export MASTERLOCALSIZE="30GB"
|
||||
# Infra nodes settings
|
||||
export INFRA_NODE_COUNT=3
|
||||
export INFRADISKSIZE="40GB"
|
||||
# By default, 8Gi RAM is required to run elasticsearch pods
|
||||
# as part of the aggregated logging component
|
||||
export INFRASIZE="n1-standard-8"
|
||||
export INFRACONTAINERSSIZE="20GB"
|
||||
export INFRALOCALSIZE="30GB"
|
||||
# App nodes settings
|
||||
export APP_NODE_COUNT=3
|
||||
export APPDISKSIZE="40GB"
|
||||
export APPSIZE="n1-standard-2"
|
||||
export APPCONTAINERSSIZE="20GB"
|
||||
export APPLOCALSIZE="30GB"
|
||||
# CNS nodes settings
|
||||
export CNS_NODE_COUNT=3
|
||||
export CNSDISKSIZE="40GB"
|
||||
# By default, 8Gi RAM is required to run CNS nodes
|
||||
export CNSSIZE="n1-standard-8"
|
||||
export CNSDISKSIZE="40GB"
|
||||
export CNSCONTAINERSSIZE="20GB"
|
||||
export CNSGLUSTERSIZE="100GB"
|
||||
@@ -1,5 +1,7 @@
|
||||
# The Reference Architecture OpenShift on Google Cloud Platform
|
||||
|
||||
## **NOTE: This repository contains deprecated scripts and ansible playbooks. Refer to the official documentation [Deploying and Managing OpenShift 3.9 on Google Cloud Platform](https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_google_cloud_platform/)**
|
||||
|
||||
This repository contains the code used to deploy an OpenShift Container Platform or OpenShift Origin environment based off of the [Reference Architecture Guide for OCP 3 on Google Cloud Platform](https://access.redhat.com/articles/2751521).
|
||||
|
||||
## Overview
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
== Scripted Installation of OpenShift on OpenStack.
|
||||
|
||||
## **NOTE: This repository contains deprecated scripts and ansible playbooks. Refer to the official documentation https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_red_hat_openstack_platform_10/[Deploying and Managing OpenShift 3.9 on Red Hat OpenStack Platform 10]**
|
||||
|
||||
The scripts presented here match those in https://access.redhat.com/articles/2743631[Red Hat OpenShift Container Platform 3 on Red Hat OpenStack Platform 8] reference architecture document. These script fragments are intended to demonstrate the steps needed to deploy OCP on OSP, but they are not the recommended method for doing so. Production deployment should be done using the https://github.com/openshift/openshift-on-openstack[OpenShift on OpenStack] Heat templates and the https://github.com/openshift/openshift-ansible[OpenShift Ansible] playbooks.
|
||||
|
||||
The documentation here does not provide comprehensive usage
|
||||
@@ -29,7 +31,7 @@ The `APP_DNS_SUFFIX` domain name is a _wildcard record_. The hostname
|
||||
portion is an asterisk (*). This will match any name in that
|
||||
subdomain.
|
||||
|
||||
.Proxy DNS Records
|
||||
.Proxy DNS Records
|
||||
----
|
||||
proxy.ocp3.example.com. 300 IN A 10.19.XX.XX
|
||||
devs.ocp3.example.com. 300 IN A 10.19.XX.XX
|
||||
@@ -68,7 +70,7 @@ export OCP3_KEY_NAME=ocp3
|
||||
export OCP3_KEY_FILE=~/keys/ocp3_rsa
|
||||
export PUBLIC_NETWORK=public_network
|
||||
|
||||
# Naming and DNS
|
||||
# Naming and DNS
|
||||
export OCP3_DOMAIN=ocp3.example.com
|
||||
export OCP3_DNS_SERVER=10.19.X.X
|
||||
export OCP3_DNS_UPDATE_KEY=~/keys/dns-update.key
|
||||
@@ -90,7 +92,7 @@ subscription pool which contains the OCP and OSP repositories for the
|
||||
selected versions.
|
||||
|
||||
Each host must be registered to receive software from the Red Hat RPM
|
||||
repositories.
|
||||
repositories.
|
||||
|
||||
* `rhel-7-server-ose-${OCP_VERSION}-rpms`
|
||||
* `rhel-7-server-openstack-${OSP_VERSION}-rpms`
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
== DNS Service for OpenStack with Heat
|
||||
|
||||
NOTE: This repository contains deprecated scripts and ansible playbooks. Refer to the official documentation https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_red_hat_openstack_platform_10/[Deploying and Managing OpenShift 3.9 on Red Hat OpenStack Platform 10]
|
||||
|
||||
This repository defines a simple distributed DNS service within an
|
||||
OpenStack Heat stack. The goal is to de-mystify DNS services and
|
||||
deployments for low level delegated sub-domains. It is NOT to provide
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
# Reference Architecture: OpenShift Container Platform on Red Hat Virtualization
|
||||
This repository contains the Ansible playbooks used to deploy
|
||||
This subdirectory contains the Ansible playbooks used to deploy
|
||||
an OpenShift Container Platform environment on Red Hat Virtualization
|
||||
|
||||
Current versions:
|
||||
|
||||
* OpenShift Container Platform 3.9
|
||||
* Red Hat Virtualization 4.2 (beta)
|
||||
* Red Hat Enterprise Linux 7.5
|
||||
|
||||
## Overview
|
||||
This reference architecture provides a comprehensive example demonstrating how Red Hat OpenShift Container Platform
|
||||
can be set up to take advantage of the native high availability capabilities of Kubernetes and Red Hat Virtualization
|
||||
@@ -9,15 +15,15 @@ in order to create a highly available OpenShift Container Platform environment.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Preparing the Deployment Host
|
||||
### Preparing the Bastion Host
|
||||
|
||||
Ensure the deployment host (aka workstation host) is running Red Hat Enterprise
|
||||
Linux 7 and is registered and subscribed to at least the following channels:
|
||||
Ensure the bastion host is running Red Hat Enterprise Linux 7 and is registered and
|
||||
subscribed to at least the following channels:
|
||||
|
||||
* rhel-7-server-rpms
|
||||
* rhel-7-server-extras-rpms
|
||||
|
||||
The following commands should be issued from the deployment host (by preference from a
|
||||
The following commands should be issued from the bastion host (by preference from a
|
||||
regular user account with sudo access):
|
||||
|
||||
```
|
||||
@@ -27,13 +33,17 @@ $ cd ~/git/ && git clone https://github.com/openshift/openshift-ansible-contrib
|
||||
$ cd ~/git/openshift-ansible-contrib && ansible-playbook playbooks/deploy-host.yaml -e provider=rhv
|
||||
```
|
||||
|
||||
All subsequent work will be performed from the reference-architecture/rhv-ansible sub directory.
|
||||
|
||||
### oVirt Ansible roles
|
||||
[oVirt Ansible roles](https://github.com/ovirt/ovirt-ansible) will be installed
|
||||
RPMs providing the [oVirt Ansible roles](https://github.com/ovirt/ovirt-ansible) will be installed
|
||||
into your system's Ansible role path, typically `/usr/share/ansible/roles`.
|
||||
These are required for playbooks to interact with RHV/oVirt to create VMs.
|
||||
|
||||
### Dynamic Inventory
|
||||
A copy of `ovirt4.py` from the Ansible project is provided under the inventory directory. This script will, given credentials to a RHV 4 engine, populate the Ansible inventory with facts about all virtual machines in the cluster. In order to use this dynamic inventory, see the `ovirt.ini.example` file, either providing the relevant Python secrets via environment variables, or by copying it to `ovirt.ini` and filling in the values.
|
||||
A copy of [`ovirt4.py`](inventory/ovirt4.py) from the Ansible project is provided under the [`reference-architecture/rhv-ansible/inventory`](inventory) directory. This script will, given credentials to a RHV 4 engine, populate the Ansible inventory with facts about all virtual machines in the cluster. In order to use this dynamic inventory, see the [`ovirt.ini.example`](inventory/ovirt.ini.example) file, either providing the relevant Python secrets via environment variables, or by copying it to `ovirt.ini` and filling in the values.
|
||||
|
||||
This reference architecture uses the dynamic inventory to establish DNS entries in the form of either an /etc/hosts file or nsupdate script for the provisioned virtual machines. All other playbooks are performed using a static inventory. If DNS updates are to be performed manually, the dynamic inventory script may be unnecessary.
|
||||
|
||||
### Red Hat Virtualization Certificate
|
||||
A copy of the `/etc/pki/ovirt-engine/ca.pem` from the RHV engine will need to be added to the
|
||||
@@ -46,31 +56,53 @@ $ curl --output ca.pem 'http://engine.example.com/ovirt-engine/services/pki-reso
|
||||
|
||||
### RHEL QCOW2 Image
|
||||
The oVirt-ansible role, oVirt.image-template requires a URL to download a QCOW2 KVM image to use as
|
||||
the basis for the VMs on which OpenShift will be installed. If a CentOS image is desired, a suitable
|
||||
URL is commented out in the variable file, `playbooks/vars/ovirt-infra-vars.yaml`. If a RHEL image
|
||||
is preferred, log in at <https://access.redhat.com/>, navigate to Downloads, Red Hat Enterprise Linux,
|
||||
select the latest release (at this time, 7.3), and copy the URL for "KVM Guest Image". It is
|
||||
the basis for the VMs on which OpenShift will be installed.
|
||||
|
||||
If a CentOS image is desired, a suitable URL is commented out in the static inventory under `localhost`.
|
||||
|
||||
If a RHEL image is preferred, log in at <https://access.redhat.com/>, navigate to Downloads, Red Hat Enterprise Linux,
|
||||
select the latest release (at this time, 7.5), and copy the URL for "KVM Guest Image". If possible, download
|
||||
this file to the bastion host, and set the `image_path` variable to its location. Otherwise, it is
|
||||
preferable to download the image to a local server, e.g. the /pub/ directory of a satellite if
|
||||
available, and provide that URL to the Ansible playbook, because the download link will expire
|
||||
after a short while and need to be refreshed.
|
||||
|
||||
### Ansible Vault
|
||||
A number of variables used by the OpenShift and oVirt Ansible installers are prefixed with `vault_`. Those
|
||||
variables are expected to be populated in an Ansible Vault file and stored in a safe location.
|
||||
For more information, please see the
|
||||
[Ansible Vault Documentation](http://docs.ansible.com/ansible/2.5/user_guide/vault.html).
|
||||
|
||||
## Usage
|
||||
|
||||
Edit the `ocp-vars.yaml` file in this directory, and fill in any blank values.
|
||||
### Populate Values
|
||||
|
||||
Check variables listed in `playbooks/vars/ovirt-infra-vars.yaml`
|
||||
Three files will need to be copied from examples and edited:
|
||||
|
||||
* As mentioned above, protected values should be created in an ansible vault, e.g. [`vault.yaml`](vault.yaml) in the user's home directory. A template is provided in the examples directory. This will hold RHV credentials and, in the case of RHEL hosts, subscription credentials.
|
||||
|
||||
* The [`ovirt-infra-vars.yaml`](ovirt-infra-vars.yaml) file defines the virtual machines created by the `ovirt-vm-infra.yaml` playbook. The host names created here must match those in the static inventory.
|
||||
|
||||
* A copy of a static inventory is provided as [yaml](example/inventory.yaml) or [ini](example/inventory), populated with hosts in the example.com domain along with variables pertaining to the reference architecture. This inventory should be added to /etc/ansible/hosts (or added manually using the -i flag during each `ansible-playbook` run).
|
||||
|
||||
### Set up virtual machines in RHV
|
||||
From the `reference-architecture/rhv-ansible` directory, run
|
||||
|
||||
```
|
||||
ansible-playbook playbooks/ovirt-vm-infra.yaml -e@ocp-vars.yaml
|
||||
ansible-playbook -e@~/vault.yaml playbooks/ovirt-vm-infra.yaml
|
||||
```
|
||||
|
||||
### Set up OpenShift Container Platform on the VMs from the previoius step
|
||||
### Optionally output DNS entries and update DNS records with dynamically provisioned information (Note the use of two inventories here, localhost variables like `openshift_master_default_subdomain` are required to form the output files)
|
||||
|
||||
```
|
||||
ansible-playbook playbooks/openshift-install.yaml -e@ocp-vars.yaml
|
||||
ansible-playbook -e@~/vault.yaml -i /etc/ansible/hosts -i inventory playbooks/output-dns.yaml
|
||||
|
||||
```
|
||||
|
||||
### Set up OpenShift Container Platform on the VMs from the previous step
|
||||
|
||||
```
|
||||
ansible-playbook -e@~/vault.yaml /usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml
|
||||
|
||||
ansible-playbook -e@~/vault.yaml /usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml
|
||||
```
|
||||
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
[defaults]
|
||||
forks = 50
|
||||
host_key_checking = False
|
||||
inventory = inventory/
|
||||
inventory_ignore_extensions = .example, .ini, .pyc, .pem
|
||||
gathering = smart
|
||||
# Roles path assumes oVirt-ansible roles installed to /usr/share/ansible/roles via RPM
|
||||
# per instructions at: https://github.com/oVirt/ovirt-ansible
|
||||
roles_path = ./playbooks/roles:../../roles:/usr/share/ansible/roles
|
||||
roles_path = ../../roles:/usr/share/ansible/roles
|
||||
remote_user = root
|
||||
retry_files_enabled=False
|
||||
log_path=./ansible.log
|
||||
vault_password_file=~/.test_vault_pw
|
||||
|
||||
#[ssh_connection]
|
||||
#ssh_args = -C -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no -o PreferredAuthentications=publickey
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
---
|
||||
- name: Docker Image Pre-Pull
|
||||
hosts: nodes
|
||||
vars:
|
||||
ose_tag: v3.9.15
|
||||
images:
|
||||
- ose-ansible
|
||||
- ose-cluster-capacity
|
||||
- ose-deployer
|
||||
- ose-docker-builder
|
||||
- ose-docker-registry
|
||||
- ose-haproxy-router
|
||||
- ose-pod
|
||||
- ose
|
||||
- node
|
||||
registry_prefix: registry.access.redhat.com/openshift3/
|
||||
tasks:
|
||||
- docker_image:
|
||||
name: "{{ registry_prefix }}{{ item }}"
|
||||
tag: "{{ ose_tag }}"
|
||||
with_items: "{{ images }}"
|
||||
...
|
||||
130
reference-architecture/rhv-ansible/example/inventory
Normal file
130
reference-architecture/rhv-ansible/example/inventory
Normal file
@@ -0,0 +1,130 @@
|
||||
[all:vars]
|
||||
app_dns_prefix=apps
|
||||
public_hosted_zone=example.com
|
||||
load_balancer_hostname=lb.{{public_hosted_zone}}
|
||||
openshift_master_cluster_hostname="{{ load_balancer_hostname }}"
|
||||
openshift_master_cluster_public_hostname=openshift-master.{{ public_hosted_zone }}
|
||||
openshift_master_default_subdomain="{{ app_dns_prefix }}.{{ public_hosted_zone }}"
|
||||
openshift_public_hostname="{{openshift_master_cluster_public_hostname}}"
|
||||
|
||||
[workstation]
|
||||
localhost ansible_connection=local
|
||||
|
||||
[workstation:vars]
|
||||
# RHV Engine
|
||||
engine_url="{{ vault_engine_url }}"
|
||||
engine_user="{{ vault_engine_user }}"
|
||||
engine_password="{{ vault_engine_password }}"
|
||||
# CA file copied from engine:/etc/pki/ovirt-engine/ca.pem
|
||||
# path is relative to playbook directory
|
||||
engine_cafile=../ca.pem
|
||||
|
||||
# QCOW2 KVM Guest Image
|
||||
#qcow_url=https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2c
|
||||
qcow_url=https://access.cdn.redhat.com//content/origin/files/XXXX/rhel-server-7.5-x86_64-kvm.qcow2?_auth_=XXXX
|
||||
template_name=rhel75
|
||||
image_path="{{ lookup('env', 'HOME') }}/Downloads/{{ template_name }}.qcow2"
|
||||
|
||||
# RHV VM Cluster Info
|
||||
rhv_cluster=Default
|
||||
rhv_data_storage=vmstore
|
||||
root_ssh_key="{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
|
||||
|
||||
# DNS server for nsupdate
|
||||
# nsupdate_server=localhost
|
||||
|
||||
|
||||
[OSEv3:children]
|
||||
nodes
|
||||
masters
|
||||
etcd
|
||||
lb
|
||||
|
||||
[OSEv3:vars]
|
||||
# General variables
|
||||
ansible_ssh_user=root
|
||||
console_port=8443
|
||||
debug_level=2
|
||||
deployment_type=openshift-enterprise
|
||||
openshift_debug_level="{{ debug_level }}"
|
||||
openshift_deployment_type="{{ deployment_type }}"
|
||||
openshift_master_cluster_method=native
|
||||
openshift_node_debug_level="{{ node_debug_level | default(debug_level, true) }}"
|
||||
openshift_release=3.9
|
||||
openshift_vers=v3_9
|
||||
|
||||
# RHV Specific Settings
|
||||
openshift_enable_service_catalog=False
|
||||
# openshift_cloudprovider_kind=ovirt
|
||||
# openshift_cloudprovider_ovirt_auth_url=
|
||||
# openshift_cloudprovider_ovirt_username=
|
||||
# openshift_cloudprovider_ovirt_password=
|
||||
|
||||
# Docker
|
||||
container_runtime_docker_storage_setup_device=/dev/vdb
|
||||
container_runtime_docker_storage_type=overlay2
|
||||
openshift_docker_use_system_container=False
|
||||
openshift_node_local_quota_per_fsgroup=512Mi
|
||||
openshift_use_system_containers=False
|
||||
oreg_url="registry.access.redhat.com/openshift3/ose-${component}:${version}"
|
||||
openshift_examples_modify_imagestreams=true
|
||||
|
||||
# Pod Networking
|
||||
os_sdn_network_plugin_name=redhat/openshift-ovs-networkpolicy
|
||||
|
||||
|
||||
# Registry
|
||||
openshift_hosted_registry_replicas=1
|
||||
openshift_hosted_registry_storage_kind=nfs
|
||||
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
|
||||
openshift_hosted_registry_selector='region=infra'
|
||||
openshift_hosted_registry_storage_host=
|
||||
openshift_hosted_registry_storage_nfs_directory=/var/lib/exports
|
||||
openshift_hosted_registry_storage_volume_name=registryvol
|
||||
openshift_hosted_registry_storage_volume_size=20Gi
|
||||
|
||||
# Authentication
|
||||
openshift_master_identity_providers="[{'name': 'htpasswd_auth', 'login': 'True', 'challenge': 'True', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]"
|
||||
openshift_master_htpasswd_users={'myuser': '$apr1$zAhyA9Ko$rBxBOwAwwtRuuaw8OtCwH0'}
|
||||
|
||||
# Red Hat Subscription Management
|
||||
rhsub_pool=Red Hat OpenShift Container Platform*
|
||||
rhsub_user="{{ vault_rhsub_user }}"
|
||||
rhsub_password="{{ vault_rhsub_password }}"
|
||||
|
||||
# Load Balancer Config
|
||||
# Using yaml style syntax here as this looks horrible in embedded json
|
||||
openshift_loadbalancer_additional_frontends=[{"name":"apps-http","option":"tcplog","binds":["*:80"],"default_backend":"apps-http"},{"name":"apps-https","option":"tcplog","binds":["*:443"],"default_backend":"apps-http"}]
|
||||
openshift_loadbalancer_additional_backends=[{"name":"apps-http","balance":"source","servers":[{"name":"infra0","address":"{{ groups['infras'].0 }}:80","opts":"check"},{"name":"infra1","address":"{{ groups['infras'].1 }}:80","opts":"check"},{"name":"infra2","address":"{{ groups['infras'].2 }}:80","opts":"check"}]},{"name":"apps-https","balance":"source","servers":[{"name":"infra0","address":"{{ groups['infras'].0 }}:443","opts":"check"},{"name":"infra1","address":"{{ groups['infras'].1 }}:443","opts":"check"},{"name":"infra2","address":"{{ groups['infras'].2 }}:443","opts":"check"}]}]
|
||||
|
||||
[masters]
|
||||
master0.example.com
|
||||
master1.example.com
|
||||
master2.example.com
|
||||
|
||||
[etcd]
|
||||
master0.example.com
|
||||
master1.example.com
|
||||
master2.example.com
|
||||
|
||||
[infras]
|
||||
infra0.example.com
|
||||
infra1.example.com
|
||||
infra2.example.com
|
||||
|
||||
[lb]
|
||||
lb.example.com
|
||||
|
||||
[nodes]
|
||||
master0.example.com openshift_node_labels="{'region': 'master'}" openshift_hostname=master0.example.com
|
||||
master1.example.com openshift_node_labels="{'region': 'master'}" openshift_hostname=master1.example.com
|
||||
master2.example.com openshift_node_labels="{'region': 'master'}" openshift_hostname=master2.example.com
|
||||
infra0.example.com openshift_node_labels="{'region': 'infra'}" openshift_hostname=infra0.example.com
|
||||
infra1.example.com openshift_node_labels="{'region': 'infra'}" openshift_hostname=infra1.example.com
|
||||
infra2.example.com openshift_node_labels="{'region': 'infra'}" openshift_hostname=infra2.example.com
|
||||
app0.example.com openshift_node_labels="{'region': 'primary'}" openshift_hostname=app0.example.com
|
||||
app1.example.com openshift_node_labels="{'region': 'primary'}" openshift_hostname=app1.example.com
|
||||
app2.example.com openshift_node_labels="{'region': 'primary'}" openshift_hostname=app2.example.com
|
||||
lb.example.com
|
||||
|
||||
# vim: set syntax=dosini
|
||||
@@ -1,5 +1,37 @@
|
||||
---
|
||||
all:
|
||||
vars:
|
||||
app_dns_prefix: apps
|
||||
public_hosted_zone: example.com
|
||||
load_balancer_hostname: lb.{{public_hosted_zone}}
|
||||
openshift_master_cluster_hostname: "{{ load_balancer_hostname }}"
|
||||
openshift_master_cluster_public_hostname: openshift-master.{{ public_hosted_zone }}
|
||||
openshift_master_default_subdomain: "{{ app_dns_prefix }}.{{ public_hosted_zone }}"
|
||||
openshift_public_hostname: "{{openshift_master_cluster_public_hostname}}"
|
||||
hosts:
|
||||
localhost:
|
||||
ansible_connection: local
|
||||
# RHV Engine
|
||||
engine_url: "{{ vault_engine_url }}"
|
||||
engine_user: "{{ vault_engine_user }}"
|
||||
engine_password: "{{ vault_engine_password }}"
|
||||
# CA file copied from engine:/etc/pki/ovirt-engine/ca.pem
|
||||
# path is relative to playbook directory
|
||||
engine_cafile: ../ca.pem
|
||||
|
||||
# QCOW2 KVM Guest Image
|
||||
#qcow_url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2c
|
||||
qcow_url: https://access.cdn.redhat.com//content/origin/files/XXX/rhel-server-7.5-x86_64-kvm.qcow2?_auth_=XXX
|
||||
template_name: rhel75
|
||||
image_path: "{{ lookup('env', 'HOME') }}/Downloads/{{ template_name }}.qcow2"
|
||||
|
||||
# RHV VM Cluster Info
|
||||
rhv_cluster: Default
|
||||
rhv_data_storage: vmstore
|
||||
root_ssh_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
|
||||
|
||||
# DNS server for nsupdate
|
||||
#nsupdate_server: localhost
|
||||
children:
|
||||
OSEv3:
|
||||
children:
|
||||
@@ -7,49 +39,81 @@ all:
|
||||
children:
|
||||
masters:
|
||||
hosts:
|
||||
openshift-master-[0:2].example.com
|
||||
master[0:2].example.com
|
||||
vars:
|
||||
openshift_node_labels: "{'role': 'master'}"
|
||||
openshift_node_labels: "{'region': 'master'}"
|
||||
openshift_schedulable: true
|
||||
apps:
|
||||
hosts:
|
||||
openshift-node-[0:1].example.com
|
||||
app[0:2].example.com
|
||||
vars:
|
||||
openshift_node_labels: "{'role': 'app'}"
|
||||
openshift_node_labels: "{'region': 'primary'}"
|
||||
openshift_schedulable: true
|
||||
infras:
|
||||
hosts:
|
||||
openshift-infra-[0:1].example.com
|
||||
infra[0:2].example.com
|
||||
vars:
|
||||
openshift_node_labels: "{'role': 'infra'}"
|
||||
openshift_node_labels: "{'region': 'infra'}"
|
||||
openshift_schedulable: true
|
||||
lb:
|
||||
hosts:
|
||||
openshift-lb.example.com
|
||||
lb.example.com
|
||||
etcd:
|
||||
children:
|
||||
masters:
|
||||
hosts:
|
||||
master[0:2].example.com
|
||||
vars:
|
||||
# General variables
|
||||
ansible_ssh_user: root
|
||||
app_dns_prefix: apps
|
||||
console_port: 8443
|
||||
debug_level: 2
|
||||
load_balancer_hostname: openshift-lb.{{public_hosted_zone}}
|
||||
openshift_additional_repos: [{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': '{{ vault_puddle_baseurl }}', 'enabled': 1, 'gpgcheck': 0}]
|
||||
openshift_debug_level: "{{ debug_level }}"
|
||||
deployment_type: openshift-enterprise
|
||||
openshift_clock_enabled: true
|
||||
openshift_debug_level: "{{ debug_level }}"
|
||||
openshift_deployment_type: "{{ deployment_type }}"
|
||||
openshift_enable_service_catalog: false
|
||||
openshift_examples_modify_imagestreams: True
|
||||
openshift_master_cluster_method: native
|
||||
openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}"
|
||||
openshift_release: 3.9
|
||||
openshift_vers: v3_9
|
||||
osm_default_node_selector: "region=primary"
|
||||
|
||||
# RHV Specific Settings
|
||||
openshift_enable_service_catalog: False
|
||||
# openshift_cloudprovider_kind: ovirt
|
||||
# openshift_cloudprovider_ovirt_auth_url:
|
||||
# openshift_cloudprovider_ovirt_username:
|
||||
# openshift_cloudprovider_ovirt_password:
|
||||
|
||||
# Docker
|
||||
container_runtime_docker_storage_setup_device: /dev/vdb
|
||||
container_runtime_docker_storage_type: overlay2
|
||||
openshift_docker_use_system_container: False
|
||||
openshift_node_local_quota_per_fsgroup: 512Mi
|
||||
openshift_use_system_containers: False
|
||||
oreg_url: "registry.access.redhat.com/openshift3/ose-${component}:${version}"
|
||||
openshift_examples_modify_imagestreams: true
|
||||
|
||||
# Pod Networking
|
||||
os_sdn_network_plugin_name: redhat/openshift-ovs-networkpolicy
|
||||
# DNS
|
||||
app_dns_prefix: apps
|
||||
public_hosted_zone: example.com
|
||||
load_balancer_hostname: lb.{{public_hosted_zone}}
|
||||
openshift_master_cluster_hostname: "{{ load_balancer_hostname }}"
|
||||
openshift_master_cluster_public_hostname: openshift.{{ public_hosted_zone }}
|
||||
openshift_master_default_subdomain: "{{app_dns_prefix}}.{{public_hosted_zone}}"
|
||||
openshift_public_hostname: "{{openshift_master_cluster_public_hostname}}"
|
||||
|
||||
# Registry
|
||||
openshift_hosted_registry_replicas: 1
|
||||
openshift_hosted_registry_storage_kind: nfs
|
||||
openshift_hosted_registry_replicas: 2
|
||||
openshift_hosted_registry_selector: role=infra
|
||||
openshift_hosted_registry_storage_host: 192.168.155.1
|
||||
openshift_hosted_registry_storage_access_modes: ['ReadWriteMany']
|
||||
openshift_hosted_registry_selector: region=infra
|
||||
openshift_hosted_registry_storage_host:
|
||||
openshift_hosted_registry_storage_nfs_directory: /var/lib/exports
|
||||
openshift_hosted_registry_storage_volume_name: registryvol
|
||||
openshift_master_cluster_hostname: "{{ load_balancer_hostname }}"
|
||||
openshift_master_cluster_method: native
|
||||
openshift_master_cluster_public_hostname: openshift.{{ public_hosted_zone }}
|
||||
openshift_hosted_registry_storage_volume_size: 20Gi
|
||||
|
||||
# Authentication
|
||||
openshift_master_identity_providers:
|
||||
- name: htpasswd_auth
|
||||
login: true
|
||||
@@ -57,20 +121,47 @@ all:
|
||||
kind: HTPasswdPasswordIdentityProvider
|
||||
filename: /etc/origin/master/htpasswd
|
||||
openshift_master_htpasswd_users: {'myuser': '$apr1$zAhyA9Ko$rBxBOwAwwtRuuaw8OtCwH0'}
|
||||
openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}"
|
||||
openshift_override_hostname_check: true
|
||||
openshift_release: 3.9
|
||||
openshift_vers: v3_9
|
||||
openshift_web_console_prefix: registry.{{ vault_oreg_project | default ("access") }}.openshift.com/openshift3/ose-
|
||||
oreg_url: "{{ vault_oreg_url }}"
|
||||
oreg_auth_user: "{{ vault_oreg_auth_user }}"
|
||||
# https://console.{{ vault_oreg_project | default ("access") }}.openshift.com/console/command-line
|
||||
oreg_auth_password: "{{ vault_oreg_auth_password }}"
|
||||
ose-ansible-service-broker: registry.{{ vault_oreg_project | default ("access") }}.openshift.com/openshift3/ose-
|
||||
osm_default_node_selector: role=app
|
||||
public_hosted_zone: example.com
|
||||
|
||||
# Red Hat Subscription Management
|
||||
rhsub_pool: Red Hat OpenShift Container Platform*
|
||||
rhsub_server: "{{ vault_rhsub_server }}"
|
||||
rhsub_user: "{{ vault_rhsub_user }}"
|
||||
rhsub_password: "{{ vault_rhsub_password }}"
|
||||
template_service_broker_prefix: registry.{{ vault_oreg_project | default ("access") }}.openshift.com/openshift3/ose-
|
||||
|
||||
# Load Balancer Config
|
||||
openshift_loadbalancer_additional_frontends:
|
||||
- name: apps-http
|
||||
option: tcplog
|
||||
binds:
|
||||
- "*:80"
|
||||
default_backend: apps-http
|
||||
- name: apps-https
|
||||
option: tcplog
|
||||
binds:
|
||||
- "*:443"
|
||||
default_backend: apps-http
|
||||
openshift_loadbalancer_additional_backends:
|
||||
- name: apps-http
|
||||
balance: source
|
||||
servers:
|
||||
- name: infra0
|
||||
address: "{{ groups['infras'].0 }}:80"
|
||||
opts: check
|
||||
- name: infra1
|
||||
address: "{{ groups['infras'].1 }}:80"
|
||||
opts: check
|
||||
- name: infra2
|
||||
address: "{{ groups['infras'].2 }}:80"
|
||||
opts: check
|
||||
- name: apps-https
|
||||
balance: source
|
||||
servers:
|
||||
- name: infra0
|
||||
address: "{{ groups['infras'].0 }}:443"
|
||||
opts: check
|
||||
- name: infra1
|
||||
address: "{{ groups['infras'].1 }}:443"
|
||||
opts: check
|
||||
- name: infra2
|
||||
address: "{{ groups['infras'].2 }}:443"
|
||||
opts: check
|
||||
|
||||
@@ -105,15 +105,17 @@ server localhost
|
||||
zone example.com
|
||||
update delete *.apps.example.com A
|
||||
update delete openshift.example.com A
|
||||
update delete openshift-master.example.com A
|
||||
update delete openshift-master.example.com A
|
||||
update delete openshift-master-0.example.com A
|
||||
update delete openshift-master-1.example.com A
|
||||
update delete openshift-master-2.example.com A
|
||||
update delete openshift-infra-0.example.com A
|
||||
update delete openshift-infra-1.example.com A
|
||||
update delete openshift-node-0.example.com A
|
||||
update delete openshift-node-1.example.com A
|
||||
update delete openshift-lb.example.com A
|
||||
update delete master.example.com A
|
||||
update delete master.example.com A
|
||||
update delete master0.example.com A
|
||||
update delete master1.example.com A
|
||||
update delete master2.example.com A
|
||||
update delete infra0.example.com A
|
||||
update delete infra1.example.com A
|
||||
update delete infra2.example.com A
|
||||
update delete app0.example.com A
|
||||
update delete app1.example.com A
|
||||
update delete app2.example.com A
|
||||
update delete lb.example.com A
|
||||
show
|
||||
send
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
---
|
||||
##############################################################################
|
||||
##############################################################################
|
||||
####
|
||||
#### oVirt/RHV VM setup section
|
||||
####
|
||||
##############################################################################
|
||||
##############################################################################
|
||||
### Red Hat Virtualization Engine Connection
|
||||
engine_url: "{{ vault_engine_url }}"
|
||||
engine_user: "{{ vault_engine_user }}"
|
||||
engine_password: "{{ vault_engine_password }}"
|
||||
# CA file copied from engine:/etc/pki/ovirt-engine/ca.pem; path is relative to playbook directory
|
||||
engine_cafile: ../ca.pem
|
||||
|
||||
##############################################################################
|
||||
### Red Hat Virtualization VM Image
|
||||
##############################################################################
|
||||
|
||||
## For CentOS 7:
|
||||
#qcow_url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2c
|
||||
|
||||
## For RHEL: Find KVM Guest Image in Downloads -> RHEL on https://access.redhat.com/ and use before the link expires:
|
||||
#qcow_url:https://access.cdn.redhat.com//content/origin/files/<omitted>/rhel-server-7.4-x86_64-kvm.qcow2?_auth_=<omitted>
|
||||
## Alternatively, download the above KVM image, and re-host it on a local satellite:
|
||||
qcow_url: http://satellite.example.com/pub/rhel-server-7.4.x86_64-kvm.qcow2
|
||||
|
||||
template_name: rhel741
|
||||
image_path: "{{ ansible_env.HOME }}/Downloads/{{ template_name }}.qcow2"
|
||||
|
||||
|
||||
## Name of cluster to install on
|
||||
rhv_cluster: Default
|
||||
|
||||
## Name of RHV storage domain to create disks
|
||||
rhv_data_storage: vmstore
|
||||
|
||||
##############################################################################
|
||||
### PUBLIC SSH key for access to all nodes.
|
||||
## Use ssh-agent or a passwordless key in ~/.ssh/id_rsa for the PRIVATE key.
|
||||
# Example with vault
|
||||
#root_ssh_key: "{{ vault_root_ssh_key }}"
|
||||
# Example with local id_rsa.pub
|
||||
root_ssh_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
|
||||
|
||||
## Define if using nsupdate to manage DNS entries dynamically
|
||||
## This will allow the output_dns.yaml playbook to create an
|
||||
## nsupdate script
|
||||
nsupdate_server: localhost
|
||||
|
||||
@@ -11,7 +11,7 @@ engine_cafile: ../ca.pem
|
||||
##############################################################################
|
||||
### Red Hat Virtualization VM Image
|
||||
## For CentOS 7:
|
||||
qcow_url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
|
||||
qcow_url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2c
|
||||
## For RHEL: Find KVM Guest Image in Downloads -> RHEL on https://access.redhat.com/ and use before the link expires:
|
||||
#qcow_url:https://access.cdn.redhat.com//content/origin/files/<omitted>/rhel-server-7.4-x86_64-kvm.qcow2?_auth_=<omitted>
|
||||
## Alternatively, download the above KVM image, and re-host it on a local satellite:
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
ovirt-37-infra.yaml
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
###
|
||||
### Stub version of oVirt.vm-infra.yaml that uses a different vars file for testing
|
||||
###
|
||||
- name: oVirt 39 infra
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
|
||||
vars_files:
|
||||
- vars/ovirt-39-vars.yaml
|
||||
|
||||
pre_tasks:
|
||||
- name: Log in to oVirt
|
||||
ovirt_auth:
|
||||
url: "{{ engine_url }}"
|
||||
username: "{{ engine_user }}"
|
||||
password: "{{ engine_password }}"
|
||||
ca_file: "{{ engine_cafile | default(omit) }}"
|
||||
insecure: "{{ engine_insecure | default(true) }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
roles:
|
||||
- oVirt.image-template
|
||||
- oVirt.vm-infra
|
||||
|
||||
post_tasks:
|
||||
- name: Logout from oVirt
|
||||
ovirt_auth:
|
||||
state: absent
|
||||
ovirt_auth: "{{ ovirt_auth }}"
|
||||
tags:
|
||||
- always
|
||||
@@ -5,7 +5,7 @@
|
||||
gather_facts: false
|
||||
|
||||
vars_files:
|
||||
- ../playbooks/vars/ovirt-infra-vars.yaml
|
||||
- vars/ovirt-39-vars.yaml
|
||||
|
||||
pre_tasks:
|
||||
- name: Log in to oVirt
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
#!/bin/bash
|
||||
VARIANT=${1:-39}
|
||||
if [ -f "test/ocp-vars.yaml.$VARIANT" ]
|
||||
then
|
||||
VARS="-e@test/ocp-vars.yaml.$VARIANT"
|
||||
else
|
||||
VARS="-e@test/ocp-vars.yaml"
|
||||
fi
|
||||
|
||||
if [ -f "test/ovirt-${VARIANT}-infra.yaml" ]
|
||||
then
|
||||
INFRA="test/ovirt-${VARIANT}-infra.yaml"
|
||||
else
|
||||
INFRA="playbooks/ovirt-vm-infra.yaml"
|
||||
fi
|
||||
|
||||
ansible-playbook $VARS test/uninstall.yaml
|
||||
ansible-playbook $VARS $INFRA
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "Infrastructure deploy broke"
|
||||
exit
|
||||
fi
|
||||
ansible-playbook $VARS playbooks/output-dns.yaml
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "DNS generation broke"
|
||||
exit
|
||||
fi
|
||||
nsupdate -k /etc/rndc.key inventory.nsupdate
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "DNS update broke"
|
||||
exit
|
||||
fi
|
||||
ansible-playbook -i test/inventory.yaml $VARS ../../../openshift-ansible/playbooks/prerequisites.yml
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "Prerequisites installation broke"
|
||||
exit
|
||||
fi
|
||||
ansible-playbook -i test/inventory.yaml $VARS ../../../openshift-ansible/playbooks/deploy_cluster.yml
|
||||
|
||||
@@ -1,34 +1,40 @@
|
||||
#!/bin/bash
|
||||
VARIANT=${1:-centos}
|
||||
if [ -f "test/ocp-vars.yaml.$VARIANT" ]
|
||||
then
|
||||
VARS="-e@test/ocp-vars.yaml.$VARIANT"
|
||||
else
|
||||
VARS="-e@test/ocp-vars.yaml"
|
||||
fi
|
||||
VARIANT=${1:-39}
|
||||
|
||||
if [ -f "test/ovirt-${VARIANT}-infra.yaml" ]
|
||||
if [ -f "example/ovirt-${VARIANT}-infra.yaml" ]
|
||||
then
|
||||
INFRA="test/ovirt-${VARIANT}-infra.yaml"
|
||||
INFRA="example/ovirt-${VARIANT}-infra.yaml"
|
||||
else
|
||||
INFRA="playbooks/ovirt-vm-infra.yaml"
|
||||
fi
|
||||
|
||||
ansible-playbook $VARS test/uninstall.yaml
|
||||
ansible-playbook $VARS $INFRA
|
||||
ansible-playbook -e@~/vault.yaml example/uninstall.yaml
|
||||
ansible-playbook -e@~/vault.yaml $INFRA
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "Infrastructure deploy broke"
|
||||
exit
|
||||
fi
|
||||
ansible-playbook $VARS playbooks/output-dns.yaml
|
||||
echo "Waiting a minute for VMs to get IPs posted"
|
||||
sleep 60
|
||||
ansible-playbook -i /etc/ansible/hosts -i inventory playbooks/output-dns.yaml
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "DNS generation broke"
|
||||
exit
|
||||
echo "DNS generation broke. Backing off and retrying in two minutes"
|
||||
sleep 120
|
||||
ansible-playbook -i /etc/ansible/hosts -i inventory playbooks/output-dns.yaml
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "Still breaking DNS generation. exiting."
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
nsupdate -k /etc/rndc.key inventory.nsupdate
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "DNS update broke"
|
||||
exit
|
||||
fi
|
||||
ansible-playbook $VARS playbooks/openshift-install.yaml
|
||||
ansible-playbook -e@~/vault.yaml /usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "Prerequisites installation broke"
|
||||
exit
|
||||
fi
|
||||
ansible-playbook -e@~/vault.yaml /usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
---
|
||||
- name: Unregister VMs
|
||||
gather_facts: false
|
||||
gather_facts: true
|
||||
hosts:
|
||||
- tag_openshift_master
|
||||
- tag_openshift_infra
|
||||
- tag_openshift_node
|
||||
- tag_openshift_lb
|
||||
- masters
|
||||
- infras
|
||||
- nodes
|
||||
- lb
|
||||
roles:
|
||||
- rhsm-unregister
|
||||
ignore_errors: yes
|
||||
@@ -16,7 +16,7 @@
|
||||
gather_facts: false
|
||||
|
||||
vars_files:
|
||||
- vars/ovirt-37-vars.yaml
|
||||
- vars/ovirt-39-vars.yaml
|
||||
|
||||
pre_tasks:
|
||||
- name: Log in to oVirt
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
ovirt-37-vars.yaml
|
||||
@@ -0,0 +1,221 @@
|
||||
---
|
||||
###########################
|
||||
# Common
|
||||
###########################
|
||||
compatibility_version: 4.2
|
||||
|
||||
# Data center
|
||||
data_center_name: Default
|
||||
|
||||
##########################
|
||||
# VM infra
|
||||
##########################
|
||||
template_cluster: "{{ rhv_cluster }}"
|
||||
template_memory: 8GiB
|
||||
template_cpu: 1
|
||||
template_disk_storage: "{{ rhv_data_storage }}"
|
||||
template_disk_size: 60GiB
|
||||
template_nics:
|
||||
- name: nic1
|
||||
profile_name: ovirtmgmt
|
||||
interface: virtio
|
||||
|
||||
##########################
|
||||
# Other top scope vars
|
||||
##########################
|
||||
debug_vm_create: true
|
||||
wait_for_ip: true
|
||||
vm_infra_wait_for_ip_retries: 10
|
||||
vm_infra_wait_for_ip_delay: 40
|
||||
|
||||
master_vm:
|
||||
cluster: "{{ rhv_cluster }}"
|
||||
template: "{{ template_name }}"
|
||||
memory: 16GiB
|
||||
cores: 2
|
||||
high_availability: true
|
||||
disks:
|
||||
- size: 15GiB
|
||||
storage_domain: "{{ rhv_data_storage }}"
|
||||
name: docker_disk
|
||||
interface: virtio
|
||||
- size: 30GiB
|
||||
storage_domain: "{{ rhv_data_storage }}"
|
||||
name: localvol_disk
|
||||
interface: virtio
|
||||
- size: 25GiB
|
||||
storage_domain: "{{ rhv_data_storage }}"
|
||||
name: etcd_disk
|
||||
interface: virtio
|
||||
state: running
|
||||
|
||||
node_vm:
|
||||
cluster: "{{ rhv_cluster }}"
|
||||
template: "{{ template_name }}"
|
||||
memory: 8GiB
|
||||
cores: 2
|
||||
disks:
|
||||
- size: 15GiB
|
||||
storage_domain: "{{ rhv_data_storage }}"
|
||||
name: docker_disk
|
||||
interface: virtio
|
||||
- size: 30GiB
|
||||
storage_domain: "{{ rhv_data_storage }}"
|
||||
name: localvol_disk
|
||||
interface: virtio
|
||||
state: running
|
||||
|
||||
|
||||
##########################
|
||||
# Cloud Init Script
|
||||
##########################
|
||||
# Use the following if RHEL 7.4 or below VMs are being created on a RHV 4.2 or above engine
|
||||
# - sed -i 's@^# device =.*@device = /dev/virtio-ports/ovirt-guest-agent.0@' /etc/ovirt-guest-agent.conf
|
||||
# - sed -i 's@com.redhat.rhevm.vdsm@ovirt-guest-agent.0@' /etc/udev/rules.d/55-ovirt-guest-agent.rules
|
||||
# - 'udevadm trigger --subsystem-match="virtio-ports"'
|
||||
|
||||
cloud_init_script_node: |
|
||||
runcmd:
|
||||
- mkdir -p '/var/lib/origin/openshift.local.volumes'
|
||||
- /usr/sbin/mkfs.xfs -L localvol /dev/vdc
|
||||
- sleep "$(($RANDOM % 60))"
|
||||
- sync
|
||||
- reboot
|
||||
mounts:
|
||||
- [ '/dev/vdc', '/var/lib/origin/openshift.local.volumes', 'xfs', 'defaults,gquota' ]
|
||||
rh_subscription:
|
||||
username: {{vault_rhsub_user}}
|
||||
password: {{vault_rhsub_password}}
|
||||
add-pool: [{{vault_rhsub_pool}}]
|
||||
server-hostname: {{vault_rhsub_server}}
|
||||
enable-repo: ['rhel-7-server-rpms', 'rhel-7-server-extras-rpms', 'rhel-7-fast-datapath-rpms', 'rhel-7-server-ose-3.9-rpms']
|
||||
disable-repo: []
|
||||
|
||||
cloud_init_script_master: |
|
||||
runcmd:
|
||||
- mkdir -p '/var/lib/origin/openshift.local.volumes'
|
||||
- mkdir -p '/var/lib/etcd'
|
||||
- /usr/sbin/mkfs.xfs -L localvol /dev/vdc
|
||||
- /usr/sbin/mkfs.xfs -L etcd /dev/vdd
|
||||
- sleep "$(($RANDOM % 60))"
|
||||
- sync
|
||||
- reboot
|
||||
mounts:
|
||||
- [ '/dev/vdc', '/var/lib/origin/openshift.local.volumes', 'xfs', 'defaults,gquota' ]
|
||||
- [ '/dev/vdd', '/var/lib/etcd', 'xfs', 'defaults' ]
|
||||
rh_subscription:
|
||||
username: {{vault_rhsub_user}}
|
||||
password: {{vault_rhsub_password}}
|
||||
add-pool: [{{vault_rhsub_pool}}]
|
||||
server-hostname: {{vault_rhsub_server}}
|
||||
enable-repo: ['rhel-7-server-rpms', 'rhel-7-server-extras-rpms', 'rhel-7-fast-datapath-rpms', 'rhel-7-server-ose-3.9-rpms']
|
||||
disable-repo: []
|
||||
|
||||
vms:
|
||||
# Master VMs
|
||||
- name: "master0.{{ public_hosted_zone }}"
|
||||
profile: "{{ master_vm }}"
|
||||
tag: openshift_master
|
||||
cloud_init:
|
||||
host_name: "master0.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script_master }}"
|
||||
- name: "master1.{{ public_hosted_zone }}"
|
||||
tag: openshift_master
|
||||
profile: "{{ master_vm }}"
|
||||
cloud_init:
|
||||
host_name: "master1.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script_master }}"
|
||||
- name: "master2.{{ public_hosted_zone }}"
|
||||
tag: openshift_master
|
||||
profile: "{{ master_vm }}"
|
||||
cloud_init:
|
||||
host_name: "master2.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script_master }}"
|
||||
|
||||
# Infra VMs
|
||||
- name: "infra0.{{ public_hosted_zone }}"
|
||||
tag: openshift_infra
|
||||
profile: "{{ node_vm }}"
|
||||
cloud_init:
|
||||
host_name: "infra0.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script_node }}"
|
||||
- name: "infra1.{{ public_hosted_zone }}"
|
||||
tag: openshift_infra
|
||||
profile: "{{ node_vm }}"
|
||||
cloud_init:
|
||||
host_name: "infra1.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script_node }}"
|
||||
- name: "infra2.{{ public_hosted_zone }}"
|
||||
tag: openshift_infra
|
||||
profile: "{{ node_vm }}"
|
||||
cloud_init:
|
||||
host_name: "infra2.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script_node }}"
|
||||
|
||||
# Node VMs
|
||||
- name: "app0.{{ public_hosted_zone }}"
|
||||
tag: openshift_node
|
||||
profile: "{{ node_vm }}"
|
||||
cloud_init:
|
||||
host_name: "app0.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script_node }}"
|
||||
- name: "app1.{{ public_hosted_zone }}"
|
||||
tag: openshift_node
|
||||
profile: "{{ node_vm }}"
|
||||
cloud_init:
|
||||
host_name: "app1.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script_node }}"
|
||||
- name: "app2.{{ public_hosted_zone }}"
|
||||
tag: openshift_node
|
||||
profile: "{{ node_vm }}"
|
||||
cloud_init:
|
||||
host_name: "app2.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script_node }}"
|
||||
|
||||
# Load balancer
|
||||
- name: "lb.{{ public_hosted_zone }}"
|
||||
tag: openshift_lb
|
||||
profile: "{{ node_vm }}"
|
||||
cloud_init:
|
||||
host_name: "lb.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script_node }}"
|
||||
|
||||
affinity_groups:
|
||||
- name: masters_ag
|
||||
cluster: "{{ rhv_cluster }}"
|
||||
vm_enforcing: false
|
||||
vm_rule: negative
|
||||
vms:
|
||||
- "master0.{{ public_hosted_zone }}"
|
||||
- "master1.{{ public_hosted_zone }}"
|
||||
- "master2.{{ public_hosted_zone }}"
|
||||
wait: true
|
||||
- name: infra_ag
|
||||
cluster: "{{ rhv_cluster }}"
|
||||
vm_enforcing: false
|
||||
vm_rule: negative
|
||||
vms:
|
||||
- "infra0.{{ public_hosted_zone }}"
|
||||
- "infra1.{{ public_hosted_zone }}"
|
||||
- "infra2.{{ public_hosted_zone }}"
|
||||
wait: true
|
||||
|
||||
- name: app_ag
|
||||
cluster: "{{ rhv_cluster }}"
|
||||
vm_enforcing: false
|
||||
vm_rule: negative
|
||||
vms:
|
||||
- "app0.{{ public_hosted_zone }}"
|
||||
- "app1.{{ public_hosted_zone }}"
|
||||
- "app2.{{ public_hosted_zone }}"
|
||||
...
|
||||
@@ -16,6 +16,7 @@ template_memory: 8GiB
|
||||
template_cpu: 2
|
||||
template_disk_storage: "{{ rhv_data_storage }}"
|
||||
template_disk_size: 60GiB
|
||||
template_operating_system: rhel_7x64
|
||||
template_nics:
|
||||
- name: nic1
|
||||
profile_name: ovirtmgmt
|
||||
@@ -24,8 +25,7 @@ template_nics:
|
||||
##########################
|
||||
# Other top scope vars
|
||||
##########################
|
||||
# Centos does not have guest agent...
|
||||
#wait_for_ip: false
|
||||
wait_for_ip: true
|
||||
# See if anything goes wrong...
|
||||
debug_vm_create: true
|
||||
|
||||
@@ -34,6 +34,7 @@ master_vm:
|
||||
template: "{{ template_name }}"
|
||||
memory: 16GiB
|
||||
cores: 2
|
||||
state: running
|
||||
high_availability: true
|
||||
disks:
|
||||
- size: 100GiB
|
||||
@@ -46,6 +47,7 @@ node_vm:
|
||||
template: "{{ template_name }}"
|
||||
memory: 8GiB
|
||||
cores: 2
|
||||
state: running
|
||||
disks:
|
||||
- size: 100GiB
|
||||
storage_domain: "{{ rhv_data_storage }}"
|
||||
@@ -64,7 +66,8 @@ install_guest_agent_script: |
|
||||
enabled: true
|
||||
gpgcheck: false
|
||||
packages:
|
||||
- ovirt-guest-agent
|
||||
- ovirt-guest-agent-common
|
||||
- python-ipaddress
|
||||
runcmd:
|
||||
- systemctl enable ovirt-guest-agent
|
||||
- systemctl start ovirt-guest-agent
|
||||
@@ -72,8 +75,8 @@ install_guest_agent_script: |
|
||||
vms:
|
||||
# Master VMs
|
||||
- name: "openshift-master-0.{{ public_hosted_zone }}"
|
||||
profile: "{{ master_vm }}"
|
||||
tag: openshift_master
|
||||
profile: "{{ master_vm }}"
|
||||
cloud_init:
|
||||
host_name: "openshift-master-0.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
|
||||
@@ -1,108 +0,0 @@
|
||||
---
|
||||
##############################################################################
|
||||
### Red Hat Virtualization Engine Connection
|
||||
#engine_url: https://engine.example.com/ovirt-engine/api
|
||||
engine_url:
|
||||
engine_user: admin@internal
|
||||
engine_password:
|
||||
# CA file copied or downloaded from engine; path is relative to playbook directory.
|
||||
# Download URL https://<engine>/ovirt-engine/services/pki-resource?resource=ca-certificate&format=X509-PEM-CA
|
||||
# If you are running on engine, you could use the below.
|
||||
# engine_cafile: /etc/pki/ovirt-engine/ca.pem
|
||||
engine_cafile: ../ca.pem
|
||||
|
||||
##############################################################################
|
||||
### Red Hat Virtualization VM Image
|
||||
|
||||
## Overrides flow in this order:
|
||||
# template_name > image_path > qcow_url
|
||||
# in other words, if a {{ template_name }} VM template already exists in RHV, no image will be checked.
|
||||
# If the image is already downloaded to {{ image_path }}, it will not be re-downloaded.
|
||||
|
||||
## Name of template to create openshift nodes from
|
||||
template_name: rhel7
|
||||
|
||||
## If you prefer to provide a pre-downloaded VM image instead, use an absolute path, e.g.:
|
||||
#image_path: /home/myuser/Downloads/{{ template_name }}.qcow2
|
||||
|
||||
## For CentOS 7:
|
||||
#qcow_url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2c
|
||||
|
||||
## For RHEL: Find KVM Guest Image in Downloads -> RHEL on https://access.redhat.com/ and use before the link expires:
|
||||
#qcow_url:https://access.cdn.redhat.com//content/origin/files/<omitted>/rhel-server-7.4-x86_64-kvm.qcow2?_auth_=<omitted>
|
||||
## Alternatively, download the above KVM image, and re-host it on a local satellite:
|
||||
#qcow_url: https://satellite.example.com/pub/rhel-server-7.4-x86_64-kvm.qcow2
|
||||
qcow_url:
|
||||
|
||||
## Name of cluster to install on
|
||||
rhv_cluster: Default
|
||||
|
||||
## Name of RHV storage domain to create disks
|
||||
rhv_data_storage: vmstore
|
||||
|
||||
##############################################################################
|
||||
### Red Hat Content Subscriptions
|
||||
## For subscriptions to Satellite:
|
||||
rhsm_satellite: satellite.example.com
|
||||
rhsm_activation_key: vm-key
|
||||
rhsm_org_id: Default_Organization
|
||||
rhsm_pool: none
|
||||
rhsm_katello_url: http://satellite.example.com/pub/katello-ca-consumer-latest.noarch.rpm
|
||||
|
||||
## For subscriptions to Red Hat's CDN
|
||||
## Userid/Password could be moved to a vault file and encrypted for safety, see the following link for details:
|
||||
## http://docs.ansible.com/ansible/playbooks_vault.html
|
||||
#rhsm_pool: OpenShift Enterprise, Premium*
|
||||
#rhsm_user:
|
||||
#rhsm_password:
|
||||
|
||||
##############################################################################
|
||||
### PUBLIC SSH key for access to all nodes.
|
||||
## Use ssh-agent or a passwordless key in ~/.ssh/id_rsa for the PRIVATE key.
|
||||
root_ssh_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
|
||||
|
||||
##############################################################################
|
||||
### Openshift variables
|
||||
## Choices of deployment type: openshift-enterprise, origin
|
||||
deployment_type: openshift-enterprise
|
||||
openshift_vers: v3_7
|
||||
containerized: false
|
||||
console_port: 8443
|
||||
|
||||
##############################################################################
|
||||
### DNS entries
|
||||
## Wildcard *.{{app_dns_prefix}}.{{public_hosted_zone}} must point to IP of LB
|
||||
public_hosted_zone: example.com
|
||||
app_dns_prefix: apps
|
||||
load_balancer_hostname: openshift-lb.{{public_hosted_zone}}
|
||||
openshift_master_cluster_hostname: "{{load_balancer_hostname}}"
|
||||
openshift_master_cluster_public_hostname: openshift.{{public_hosted_zone}}
|
||||
|
||||
##############################################################################
|
||||
### OpenShift Identity Provider
|
||||
# htpasswd shown here, other options documented at
|
||||
# https://docs.openshift.com/container-platform/3.5/install_config/configuring_authentication.html
|
||||
openshift_master_identity_providers:
|
||||
- name: htpasswd_auth
|
||||
login: true
|
||||
challenge: true
|
||||
kind: HTPasswdPasswordIdentityProvider
|
||||
filename: /etc/origin/master/htpasswd
|
||||
# Defining htpasswd users
|
||||
#openshift_master_htpasswd_users:
|
||||
# - user1: <pre-hashed password>
|
||||
# - user2: <pre-hashed password>
|
||||
# Use 'htpasswd -n <user>' to generate password hash. (htpasswd from httpd-tools RPM)
|
||||
# Example with admin:changeme
|
||||
openshift_master_htpasswd_users: {'admin': '$apr1$zAhyA9Ko$rBxBOwAwwtRuuaw8OtCwH0'}
|
||||
# or
|
||||
#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
|
||||
|
||||
##############################################################################
|
||||
### Registry storage
|
||||
## NFS
|
||||
openshift_hosted_registry_storage_kind: nfs
|
||||
openshift_hosted_registry_selector: role=infra
|
||||
openshift_hosted_registry_storage_host: 192.168.155.10
|
||||
openshift_hosted_registry_storage_nfs_directory: /var/lib/exports
|
||||
openshift_hosted_registry_storage_volume_name: registryvol
|
||||
221
reference-architecture/rhv-ansible/ovirt-infra-vars.yaml
Normal file
221
reference-architecture/rhv-ansible/ovirt-infra-vars.yaml
Normal file
@@ -0,0 +1,221 @@
|
||||
---
|
||||
###########################
|
||||
# Common
|
||||
###########################
|
||||
compatibility_version: 4.2
|
||||
|
||||
# Data center
|
||||
data_center_name: Default
|
||||
|
||||
##########################
|
||||
# VM infra
|
||||
##########################
|
||||
template_cluster: "{{ rhv_cluster }}"
|
||||
template_name: centos7
|
||||
template_memory: 8GiB
|
||||
template_cpu: 1
|
||||
template_disk_storage: "{{ rhv_data_storage }}"
|
||||
template_disk_size: 60GiB
|
||||
template_nics:
|
||||
- name: nic1
|
||||
profile_name: ovirtmgmt
|
||||
interface: virtio
|
||||
|
||||
##########################
|
||||
# Other top scope vars
|
||||
##########################
|
||||
debug_vm_create: true
|
||||
wait_for_ip: true
|
||||
vm_infra_wait_for_ip_retries: 10
|
||||
vm_infra_wait_for_ip_delay: 40
|
||||
|
||||
master_vm:
|
||||
cluster: "{{ rhv_cluster }}"
|
||||
template: "{{ template_name }}"
|
||||
memory: 16GiB
|
||||
cores: 2
|
||||
high_availability: true
|
||||
disks:
|
||||
- size: 15GiB
|
||||
storage_domain: "{{ rhv_data_storage }}"
|
||||
name: docker_disk
|
||||
interface: virtio
|
||||
- size: 30GiB
|
||||
storage_domain: "{{ rhv_data_storage }}"
|
||||
name: localvol_disk
|
||||
interface: virtio
|
||||
- size: 25GiB
|
||||
storage_domain: "{{ rhv_data_storage }}"
|
||||
name: etcd_disk
|
||||
interface: virtio
|
||||
state: running
|
||||
|
||||
node_vm:
|
||||
cluster: "{{ rhv_cluster }}"
|
||||
template: "{{ template_name }}"
|
||||
memory: 8GiB
|
||||
cores: 2
|
||||
disks:
|
||||
- size: 15GiB
|
||||
storage_domain: "{{ rhv_data_storage }}"
|
||||
name: docker_disk
|
||||
interface: virtio
|
||||
- size: 30GiB
|
||||
storage_domain: "{{ rhv_data_storage }}"
|
||||
name: localvol_disk
|
||||
interface: virtio
|
||||
state: running
|
||||
|
||||
|
||||
##########################
|
||||
# Cloud Init Script
|
||||
##########################
|
||||
# Use the following if RHEL 7.4 or below VMs are being created on a RHV 4.2 or above engine
|
||||
# - sed -i 's@^# device =.*@device = /dev/virtio-ports/ovirt-guest-agent.0@' /etc/ovirt-guest-agent.conf
|
||||
# - sed -i 's@com.redhat.rhevm.vdsm@ovirt-guest-agent.0@' /etc/udev/rules.d/55-ovirt-guest-agent.rules
|
||||
# - 'udevadm trigger --subsystem-match="virtio-ports"'
|
||||
|
||||
cloud_init_script_master: |
|
||||
runcmd:
|
||||
- mkdir -p '/var/lib/origin/openshift.local.volumes'
|
||||
- mkdir -p '/var/lib/etcd'
|
||||
- /usr/sbin/mkfs.xfs -L localvol /dev/vdc
|
||||
- /usr/sbin/mkfs.xfs -L etcd /dev/vdd
|
||||
- sleep "$(($RANDOM % 60))"
|
||||
- sync
|
||||
- reboot
|
||||
mounts:
|
||||
- [ '/dev/vdc', '/var/lib/origin/openshift.local.volumes', 'xfs', 'defaults,gquota' ]
|
||||
- [ '/dev/vdd', '/var/lib/etcd', 'xfs', 'defaults' ]
|
||||
rh_subscription:
|
||||
username: {{vault_rhsub_user}}
|
||||
password: {{vault_rhsub_password}}
|
||||
add-pool: [{{vault_rhsub_pool}}]
|
||||
server-hostname: {{vault_rhsub_server}}
|
||||
enable-repo: ['rhel-7-server-rpms', 'rhel-7-server-extras-rpms', 'rhel-7-fast-datapath-rpms', 'rhel-7-server-ose-3.9-rpms']
|
||||
disable-repo: []
|
||||
cloud_init_script: |
|
||||
runcmd:
|
||||
- mkdir -p '/var/lib/origin/openshift.local.volumes'
|
||||
- /usr/sbin/mkfs.xfs -L localvol /dev/vdc
|
||||
- sleep "$(($RANDOM % 60))"
|
||||
- sync
|
||||
- reboot
|
||||
mounts:
|
||||
- [ '/dev/vdc', '/var/lib/origin/openshift.local.volumes', 'xfs', 'defaults,gquota' ]
|
||||
rh_subscription:
|
||||
username: {{vault_rhsub_user}}
|
||||
password: {{vault_rhsub_password}}
|
||||
add-pool: [{{vault_rhsub_pool}}]
|
||||
server-hostname: {{vault_rhsub_server}}
|
||||
enable-repo: ['rhel-7-server-rpms', 'rhel-7-server-extras-rpms', 'rhel-7-fast-datapath-rpms', 'rhel-7-server-ose-3.9-rpms']
|
||||
disable-repo: []
|
||||
|
||||
vms:
|
||||
# Master VMs
|
||||
- name: "master0.{{ public_hosted_zone }}"
|
||||
profile: "{{ master_vm }}"
|
||||
tag: openshift_master
|
||||
cloud_init:
|
||||
host_name: "master0.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script_master }}"
|
||||
- name: "master1.{{ public_hosted_zone }}"
|
||||
tag: openshift_master
|
||||
profile: "{{ master_vm }}"
|
||||
cloud_init:
|
||||
host_name: "master1.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script_master }}"
|
||||
- name: "master2.{{ public_hosted_zone }}"
|
||||
tag: openshift_master
|
||||
profile: "{{ master_vm }}"
|
||||
cloud_init:
|
||||
host_name: "master2.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script_master }}"
|
||||
|
||||
# Infra VMs
|
||||
- name: "infra0.{{ public_hosted_zone }}"
|
||||
tag: openshift_infra
|
||||
profile: "{{ node_vm }}"
|
||||
cloud_init:
|
||||
host_name: "infra0.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script }}"
|
||||
- name: "infra1.{{ public_hosted_zone }}"
|
||||
tag: openshift_infra
|
||||
profile: "{{ node_vm }}"
|
||||
cloud_init:
|
||||
host_name: "infra1.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script }}"
|
||||
- name: "infra2.{{ public_hosted_zone }}"
|
||||
tag: openshift_infra
|
||||
profile: "{{ node_vm }}"
|
||||
cloud_init:
|
||||
host_name: "infra2.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script }}"
|
||||
|
||||
# Node VMs
|
||||
- name: "app0.{{ public_hosted_zone }}"
|
||||
tag: openshift_node
|
||||
profile: "{{ node_vm }}"
|
||||
cloud_init:
|
||||
host_name: "app0.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script }}"
|
||||
- name: "app1.{{ public_hosted_zone }}"
|
||||
tag: openshift_node
|
||||
profile: "{{ node_vm }}"
|
||||
cloud_init:
|
||||
host_name: "app1.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script }}"
|
||||
- name: "app2.{{ public_hosted_zone }}"
|
||||
tag: openshift_node
|
||||
profile: "{{ node_vm }}"
|
||||
cloud_init:
|
||||
host_name: "app2.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script }}"
|
||||
|
||||
# Load balancer
|
||||
- name: "lb.{{ public_hosted_zone }}"
|
||||
tag: openshift_lb
|
||||
profile: "{{ node_vm }}"
|
||||
cloud_init:
|
||||
host_name: "lb.{{ public_hosted_zone }}"
|
||||
authorized_ssh_keys: "{{ root_ssh_key }}"
|
||||
custom_script: "{{ cloud_init_script }}"
|
||||
|
||||
affinity_groups:
|
||||
- name: masters_ag
|
||||
cluster: "{{ rhv_cluster }}"
|
||||
vm_enforcing: false
|
||||
vm_rule: negative
|
||||
vms:
|
||||
- "master0.{{ public_hosted_zone }}"
|
||||
- "master1.{{ public_hosted_zone }}"
|
||||
- "master2.{{ public_hosted_zone }}"
|
||||
wait: true
|
||||
- name: infra_ag
|
||||
cluster: "{{ rhv_cluster }}"
|
||||
vm_enforcing: false
|
||||
vm_rule: negative
|
||||
vms:
|
||||
- "infra0.{{ public_hosted_zone }}"
|
||||
- "infra1.{{ public_hosted_zone }}"
|
||||
- "infra2.{{ public_hosted_zone }}"
|
||||
wait: true
|
||||
|
||||
- name: app_ag
|
||||
cluster: "{{ rhv_cluster }}"
|
||||
vm_enforcing: false
|
||||
vm_rule: negative
|
||||
vms:
|
||||
- "app0.{{ public_hosted_zone }}"
|
||||
- "app1.{{ public_hosted_zone }}"
|
||||
- "app2.{{ public_hosted_zone }}"
|
||||
...
|
||||
@@ -1 +0,0 @@
|
||||
/usr/share/ansible/openshift-ansible/library/rpm_q.py
|
||||
@@ -1,113 +0,0 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: yes
|
||||
roles:
|
||||
- instance-groups
|
||||
|
||||
- hosts: nodes
|
||||
gather_facts: yes
|
||||
roles:
|
||||
- rhsm-timeout
|
||||
- role: atomic-update
|
||||
when: openshift.common.is_atomic
|
||||
|
||||
- hosts: nodes
|
||||
gather_facts: no
|
||||
serial: 1
|
||||
roles:
|
||||
- role: rhsm-subscription
|
||||
when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
|
||||
ansible_distribution == "RedHat" and ( rhsm_user is defined or rhsm_activation_key is defined)
|
||||
|
||||
- hosts: nodes
|
||||
gather_facts: no
|
||||
roles:
|
||||
- role: rhsm-repos
|
||||
when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
|
||||
ansible_distribution == "RedHat" and ( rhsm_user is defined or rhsm_activation_key is defined)
|
||||
- role: docker-storage-setup
|
||||
docker_dev: '/dev/vdb'
|
||||
- prerequisites
|
||||
|
||||
- hosts: masters
|
||||
gather_facts: yes
|
||||
roles:
|
||||
- master-prerequisites
|
||||
|
||||
- hosts: schedulable_nodes
|
||||
roles:
|
||||
- role: openshift-volume-quota
|
||||
local_volumes_device: '/dev/vdc'
|
||||
|
||||
- name: call openshift includes for installer
|
||||
include: /usr/share/ansible/openshift-ansible/playbooks/byo/config.yml
|
||||
vars:
|
||||
debug_level: 2
|
||||
openshift_debug_level: "{{ debug_level }}"
|
||||
wildcard_zone: "{{app_dns_prefix}}.{{public_hosted_zone}}"
|
||||
osm_cluster_network_cidr: 172.16.0.0/16
|
||||
osm_use_cockpit: false
|
||||
osm_default_node_selector: "role=app"
|
||||
osm_default_subdomain: "{{ wildcard_zone }}"
|
||||
openshift_deployment_type: "{{ deployment_type }}"
|
||||
openshift_hosted_registry_replicas: 1
|
||||
openshift_hosted_registry_storage_access_modes: ['ReadWriteMany']
|
||||
openshift_hosted_registry_storage_volume_size: 30Gi
|
||||
openshift_hosted_router_replicas: 2
|
||||
openshift_master_access_token_max_seconds: 2419200
|
||||
openshift_master_api_port: "{{ console_port }}"
|
||||
openshift_master_cluster_method: native
|
||||
openshift_master_cluster_hostname: "{{ load_balancer_hostname }}"
|
||||
openshift_master_cluster_public_hostname: "{{ load_balancer_hostname }}"
|
||||
openshift_master_console_port: "{{ console_port }}"
|
||||
openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}"
|
||||
openshift_master_default_subdomain: "{{osm_default_subdomain}}"
|
||||
openshift_master_logging_public_url: "https://kibana.{{ osm_default_subdomain }}"
|
||||
openshift_master_metrics_public_url: "https://metrics.{{ osm_default_subdomain }}/hawkular/metrics"
|
||||
openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}"
|
||||
openshift_node_kubelet_args:
|
||||
node-labels:
|
||||
- "role={{ openshift_node_labels.role }}"
|
||||
openshift_registry_selector: "role=infra"
|
||||
openshift_router_selector: "role=infra"
|
||||
openshift_metrics_hawkular_nodeselector: "role=infra"
|
||||
openshift_metrics_cassandra_nodeselector: "role=infra"
|
||||
openshift_metrics_heapster_nodeselector: "role=infra"
|
||||
openshift_logging_es_nodeselector: "role=infra"
|
||||
openshift_logging_kibana_nodeselector: "role=infra"
|
||||
openshift_logging_curator_nodeselector: "role=infra"
|
||||
#template_service_broker_selector: {'role': 'infra'}
|
||||
#openshift_template_service_broker_namespaces: ['openshift', 'testproject']
|
||||
openshift_enable_service_catalog: false
|
||||
# Load balancer config
|
||||
openshift_loadbalancer_additional_frontends:
|
||||
- name: apps-http
|
||||
option: tcplog
|
||||
binds:
|
||||
- "*:80"
|
||||
default_backend: apps-http
|
||||
- name: apps-https
|
||||
option: tcplog
|
||||
binds:
|
||||
- "*:443"
|
||||
default_backend: apps-http
|
||||
openshift_loadbalancer_additional_backends:
|
||||
- name: apps-http
|
||||
balance: source
|
||||
servers:
|
||||
- name: infra0
|
||||
address: "{{ hostvars[groups['tag_openshift_infra'].0]['ansible_host'] }}:80"
|
||||
opts: check
|
||||
- name: infra1
|
||||
address: "{{ hostvars[groups['tag_openshift_infra'].1]['ansible_host'] }}:80"
|
||||
opts: check
|
||||
- name: apps-https
|
||||
balance: source
|
||||
servers:
|
||||
- name: infra0
|
||||
address: "{{ hostvars[groups['tag_openshift_infra'].0]['ansible_host'] }}:443"
|
||||
opts: check
|
||||
- name: infra1
|
||||
address: "{{ hostvars[groups['tag_openshift_infra'].1]['ansible_host'] }}:443"
|
||||
opts: check
|
||||
...
|
||||
@@ -1,37 +0,0 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: yes
|
||||
become: no
|
||||
pre_tasks:
|
||||
- name: set fact
|
||||
set_fact:
|
||||
openshift_master_cluster_public_hostname: "{{ openshift_master_cluster_public_hostname }}"
|
||||
- name: set fact
|
||||
set_fact:
|
||||
openshift_master_cluster_hostname: "{{ openshift_master_cluster_hostname }}"
|
||||
- name: set fact
|
||||
set_fact:
|
||||
console_port: "{{ console_port}}"
|
||||
- name: set fact
|
||||
set_fact:
|
||||
wildcard_zone: "{{app_dns_prefix}}.{{public_hosted_zone}}"
|
||||
roles:
|
||||
# Group systems
|
||||
- instance-groups
|
||||
- validate-public
|
||||
|
||||
- hosts: masters
|
||||
gather_facts: no
|
||||
roles:
|
||||
- validate-masters
|
||||
|
||||
- hosts: masters
|
||||
gather_facts: yes
|
||||
roles:
|
||||
- validate-etcd
|
||||
|
||||
- hosts: single_master
|
||||
gather_facts: yes
|
||||
become: yes
|
||||
roles:
|
||||
- validate-app
|
||||
@@ -39,6 +39,7 @@
|
||||
- "{{ groups['tag_openshift_infra'] }}"
|
||||
- "{{ groups['tag_openshift_node'] }}"
|
||||
- "{{ groups['tag_openshift_lb'] }}"
|
||||
when: nsupdate_server is defined
|
||||
- name: Append show/send lines
|
||||
lineinfile:
|
||||
path: ../inventory.nsupdate
|
||||
@@ -50,16 +51,19 @@
|
||||
path: ../inventory.nsupdate
|
||||
line: "zone {{public_hosted_zone}}"
|
||||
state: present
|
||||
when: nsupdate_server is defined
|
||||
- name: Create NSUPDATE wildcard A entry
|
||||
lineinfile:
|
||||
path: ../inventory.nsupdate
|
||||
line: "update add *.{{app_dns_prefix}}.{{public_hosted_zone}} 86400 A {{lb_ip}}"
|
||||
line: "update add *.{{ openshift_master_default_subdomain }} 86400 A {{lb_ip}}"
|
||||
state: present
|
||||
when: nsupdate_server is defined
|
||||
- name: Create NSUPDATE public hostname entry
|
||||
lineinfile:
|
||||
path: ../inventory.nsupdate
|
||||
line: "update add {{openshift_master_cluster_public_hostname}} 86400 A {{lb_ip}}"
|
||||
state: present
|
||||
when: nsupdate_server is defined
|
||||
- name: Create NSUPDATE host A entries
|
||||
lineinfile:
|
||||
path: ../inventory.nsupdate
|
||||
@@ -70,6 +74,7 @@
|
||||
- "{{ groups['tag_openshift_infra'] }}"
|
||||
- "{{ groups['tag_openshift_node'] }}"
|
||||
- "{{ groups['tag_openshift_lb'] }}"
|
||||
when: nsupdate_server is defined
|
||||
- name: Append show/send lines
|
||||
lineinfile:
|
||||
path: ../inventory.nsupdate
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
- name: oVirt infra
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
|
||||
vars_files:
|
||||
- vars/ovirt-infra-vars.yaml
|
||||
|
||||
pre_tasks:
|
||||
- name: Login to oVirt
|
||||
ovirt_auth:
|
||||
url: "{{ engine_url }}"
|
||||
username: "{{ engine_user }}"
|
||||
password: "{{ engine_password }}"
|
||||
ca_file: "{{ engine_cafile | default(omit) }}"
|
||||
insecure: "{{ engine_insecure | default(true) }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
roles:
|
||||
- ovirt-infra
|
||||
|
||||
post_tasks:
|
||||
- name: Logout from oVirt
|
||||
ovirt_auth:
|
||||
state: absent
|
||||
ovirt_auth: "{{ ovirt_auth }}"
|
||||
tags:
|
||||
- always
|
||||
@@ -5,7 +5,7 @@
|
||||
gather_facts: false
|
||||
|
||||
vars_files:
|
||||
- vars/ovirt-infra-vars.yaml
|
||||
- ../ovirt-infra-vars.yaml
|
||||
|
||||
pre_tasks:
|
||||
- name: Log in to oVirt
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
gather_facts: false
|
||||
|
||||
vars_files:
|
||||
- vars/ovirt-infra-vars.yaml
|
||||
- ../ovirt-infra-vars.yaml
|
||||
|
||||
pre_tasks:
|
||||
- name: Log in to oVirt
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user