From 3d1a77147bfc15bfefec6929e698bae462375a2d Mon Sep 17 00:00:00 2001 From: vr4manta Date: Wed, 11 Oct 2023 12:54:05 -0400 Subject: [PATCH] Initial changes for removal of terraform --- images/installer/Dockerfile.upi.ci | 6 + upi/vsphere/README.md | 79 ++++- upi/vsphere/lb/haproxy.erb.tmpl | 57 +++ upi/vsphere/upi-destroy.ps1 | 73 ++++ upi/vsphere/upi-functions.ps1 | 201 +++++++++++ upi/vsphere/upi.ps1 | 540 +++++++++++++++++++++++++++++ upi/vsphere/variables.ps1.example | 165 +++++++++ 7 files changed, 1119 insertions(+), 2 deletions(-) create mode 100644 upi/vsphere/lb/haproxy.erb.tmpl create mode 100644 upi/vsphere/upi-destroy.ps1 create mode 100644 upi/vsphere/upi-functions.ps1 create mode 100644 upi/vsphere/upi.ps1 create mode 100644 upi/vsphere/variables.ps1.example diff --git a/images/installer/Dockerfile.upi.ci b/images/installer/Dockerfile.upi.ci index 4771d0c0af..e7203aa154 100644 --- a/images/installer/Dockerfile.upi.ci +++ b/images/installer/Dockerfile.upi.ci @@ -37,6 +37,7 @@ RUN yum update -y && \ unzip \ openssh-clients \ openssl \ + powershell \ python3-pyOpenSSL \ python2-pyyaml \ python3-pyyaml \ @@ -91,6 +92,11 @@ RUN mkdir /output && HOME=/output && \ ibmcloud version && \ ibmcloud plugin list +# Install VMware plugin for powershell. Create settings directory /output/.local/share/VMware/PowerCLI +RUN pwsh -Command 'Install-Module VMware.PowerCLI -Force -Scope AllUsers' && \ + pwsh -Command 'Install-Module -Name EPS -RequiredVersion 1.0 -Force -Scope AllUsers' && \ + mkdir -p /output/.local/share/VMware/PowerCLI && chmod -R 777 /output/.local + RUN chown 1000:1000 /output && chmod -R g=u "/output/.bluemix/" USER 1000:1000 ENV PATH /bin diff --git a/upi/vsphere/README.md b/upi/vsphere/README.md index 6e9e160a22..362e4e9627 100644 --- a/upi/vsphere/README.md +++ b/upi/vsphere/README.md @@ -1,9 +1,84 @@ -# Pre-Requisites +This project shows two ways to install an UPI cluster. We will discuss how to install using one of these two techniques: +- Terraform +- PowerShell + +# Table of Contents +- [PowerShell](#PowerShell) + - [Pre-Requisites](#pre-requisites) + - [PowerShell Setup](#powershell-setup) + - [VMware.PowerCLI](#vmwarepowercli) + - [EPS](#eps) + - [Script Configuration] + - [OpenShift Installation Configuration]() +- [Terraform](#Terraform) + - [Pre-Requisites](#pre-requisites-1) + - [Build a Cluster](#build-a-cluster-1) + +# PowerShell +This section will describe the process to generate the vSphere VMs using PowerShell and the supplied scripts in this module. + +## Pre-requisites +* PowerShell +* PowerShell VMware.PowerCLI Module +* PowerShell EPS Module + +## PowerShell Setup + +PowerShell will need to have a couple of plugin installed in order for our script to work. The plugins we need to install are VMware.PowerCLI and EPS. + +### VMware.PowerCLI + +To install the VMware.PowerCLI, you can run the following command: + +```shell +pwsh -Command 'Install-Module VMware.PowerCLI -Force -Scope CurrentUser' +``` + +### EPS + +To install the EPS module, you can run the following command: + +```shell +pwsh -Command 'Install-Module -Name EPS -RequiredVersion 1.0 -Force -Scope CurrentUser' +``` + +### Generating CLI Credentials + +The PowerShell scripts require that a credentials file be generated with the credentials to be used for generating the vSphere resources. This does not have to be the credentials used by the OpenShift cluster via the install-config.yaml, but must have all permissions to create folders, tags, templates, and vms. To generate the credentials files, run: + +```shell +pwsh -command "\$User='';\$Password=ConvertTo-SecureString -String '' -AsPlainText -Force;\$Credential = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList \$User, \$Password;\$Credential | Export-Clixml secrets/vcenter-creds.xml" +``` + +Be sure to modify `` to be the username for vCenter and `` to the your password. The output of this needs to go into `secrets/vcenter-creds.xml`. Make sure the secrets directory exists before running the credentials generation command above. + +## Script Configuration + +The PowerShell script provided by this project provides examples on how to do several aspects to creating a UPI cluster environment. It is configurable to do as much or as little as you need. For the CI build process, we will handle all install-config.yaml configuration, uploading of templates, and monitoring of installation progress. This project can handle doing all that as well if configured appropriately. + +### Behavioral Configurations + +| Property | Description | +|---------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| createInstallConfig | Enable script to create install config based on configuration of variables.ps1 | +| downloadInstaller | Enable script to download installer to be used. If not downloading the installer, the installer must be placed in the same directory as this script. | +| uploadTemplateOva | Enable script to upload OVA template to be used for all VM being created. | +| generateIgnitions | Enable script to generate ignition configs. This is normally used when install-config.yaml is provided to script, but need script to generate the ignition configs for VMs. | +| waitForComplete | This option has the script step through the process of waiting for installation complete. Most of this functionality is provided by `openshift-install wait-for`. The script will will check for when api is ready, bootstrap complete, accept CSRs and then for all COs to be done installing. | +| delayVMStart | This option has the script delay the start of the VMs after their creation. | + +## Build a Cluster + +# Terraform +This section will walk you through generating a cluster using Terraform. + + +## Pre-Requisites * terraform * jq -# Build a Cluster +## Build a Cluster 1. Create an install-config.yaml. The machine CIDR for the dev cluster is 139.178.89.192/26. diff --git a/upi/vsphere/lb/haproxy.erb.tmpl b/upi/vsphere/lb/haproxy.erb.tmpl new file mode 100644 index 0000000000..ac4806db66 --- /dev/null +++ b/upi/vsphere/lb/haproxy.erb.tmpl @@ -0,0 +1,57 @@ +defaults + maxconn 20000 + mode tcp + log /var/run/haproxy/haproxy-log.sock local0 + option dontlognull + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 86400s + timeout server 86400s + timeout tunnel 86400s + +frontend api-server + bind <%= $lb_ip_address %>:6443 + default_backend api-server + +frontend machine-config-server + bind <%= $lb_ip_address %>:22623 + default_backend machine-config-server + +frontend router-http + bind <%= $lb_ip_address %>:80 + default_backend router-http + +frontend router-https + bind <%= $lb_ip_address %>:443 + default_backend router-https + +backend api-server + option httpchk GET /readyz HTTP/1.0 + option log-health-checks + balance roundrobin +<% foreach ($addr in $api) { -%> + server <%= $addr %> <%= $addr %>:6443 weight 1 verify none check check-ssl inter 1s fall 2 rise 3 +<% } -%> + +backend machine-config-server + balance roundrobin +<% foreach ($addr in $api) { -%> + server <%= $addr %> <%= $addr %>:22623 check +<% } -%> + +backend router-http + balance source + mode tcp +<% foreach ($addr in $ingress) { -%> + server <%= $addr %> <%= $addr %>:80 check +<% } -%> + +backend router-https + balance source + mode tcp +<% foreach ($addr in $ingress) { -%> + server <%= $addr %> <%= $addr %>:443 check +<% } -%> + diff --git a/upi/vsphere/upi-destroy.ps1 b/upi/vsphere/upi-destroy.ps1 new file mode 100644 index 0000000000..6e3586478c --- /dev/null +++ b/upi/vsphere/upi-destroy.ps1 @@ -0,0 +1,73 @@ +#!/usr/bin/pwsh + +. .\variables.ps1 + +$ErrorActionPreference = "Stop" + +# since we do not have ca for vsphere certs, we'll just set insecure +Set-PowerCLIConfiguration -InvalidCertificateAction:Ignore -Confirm:$false | Out-Null +$Env:GOVC_INSECURE = 1 + +# Connect to vCenter +Connect-VIServer -Server $vcenter -Credential (Import-Clixml $vcentercredpath) + +# Convert the installer metadata to a powershell object +$metadata = Get-Content -Path ./metadata.json | ConvertFrom-Json + +# Get tag for all resources we created +$tagCategory = Get-TagCategory -Name "openshift-$($metadata.infraID)" +$tag = Get-Tag -Category $tagCategory -Name "$($metadata.infraID)" + +# Clean up all VMs +$vms = Get-VM -Tag $tag +foreach ($vm in $vms) { + if ($vm.PowerState -eq "PoweredOn") { + Write-Output "Stopping VM $vm" + Stop-VM -VM $vm -confirm:$false > $null + } + Write-Output "Removing VM $vm" + Remove-VM -VM $vm -DeletePermanently -confirm:$false +} + +# Clean up all templates +$templates = Get-TagAssignment -Tag $tag -Entity (Get-Template) +foreach ($template in $templates) { + Write-Output "Removing template $($template.Entity)" + Remove-Template -Template $($template.Entity) -DeletePermanently -confirm:$false +} + +# Clean up storage policy +$storagePolicies = Get-SpbmStoragePolicy -Tag $tag + +foreach ($policy in $storagePolicies) { + + $clusterInventory = @() + $splitResults = @($policy.Name -split "openshift-storage-policy-") + + if ($splitResults.Count -eq 2) { + $clusterId = $splitResults[1] + if ($clusterId -ne "") { + Write-Host $clusterId + $clusterInventory = @(Get-Inventory -Name "$($clusterId)*" -ErrorAction Continue) + + if ($clusterInventory.Count -eq 0) { + Write-Host "Removing policy: $($policy.Name)" + $policy | Remove-SpbmStoragePolicy -Confirm:$false + } + else { + Write-Host "not deleting: $($clusterInventory)" + } + } + } +} + +# Clean up all folders +$folders = Get-TagAssignment -Tag $tag -Entity (Get-Folder) +foreach ($folder in $folders) { + Write-Output "Removing folder $($folder.Entity)" + Remove-Folder -Folder $($folder.Entity) -DeletePermanently -confirm:$false +} + +# Clean up tags +Remove-Tag -Tag $tag -confirm:$false +Remove-TagCategory -Category $tagCategory -confirm:$false \ No newline at end of file diff --git a/upi/vsphere/upi-functions.ps1 b/upi/vsphere/upi-functions.ps1 new file mode 100644 index 0000000000..18f17594db --- /dev/null +++ b/upi/vsphere/upi-functions.ps1 @@ -0,0 +1,201 @@ +#!/usr/bin/pwsh + +function New-OpenShiftVM { + param( + [Parameter(Mandatory=$true)] + $Datastore, + $FailureDomain, + [Parameter(Mandatory=$true)] + [string]$IgnitionData, + [switch]$LinkedClone, + $Location, + $MemoryMB, + [Parameter(Mandatory=$true)] + [string]$Name, + $Network, + $Networking, + $NumCpu, + $ReferenceSnapshot, + $ResourcePool, + [Parameter(Mandatory=$true)] + $Tag, + [Parameter(Mandatory=$true)] + $Template, + $VMHost + ) + + #Write-Output $IgnitionData + + # Create arg collection for New-VM + $args = $PSBoundParameters + $args.Remove('Template') > $null + $args.Remove('IgnitionData') > $null + $args.Remove('Tag') > $null + $args.Remove('Networking') > $null + $args.Remove('Network') > $null + $args.Remove('MemoryMB') > $null + $args.Remove('NumCpu') > $null + foreach ($key in $args.Keys){ + if ($NULL -eq $($args.Item($key)) -or $($args.Item($key)) -eq "") { + $args.Remove($key) > $null + } + } + + # Clone the virtual machine from the imported template + # $vm = New-VM -VM $Template -Name $Name -Datastore $Datastore -ResourcePool $ResourcePool #-Location $Folder #-LinkedClone -ReferenceSnapshot $Snapshot + $vm = New-VM -VM $Template @args + + # Assign tag so we can later clean up + New-TagAssignment -Entity $vm -Tag $Tag > $null + + # Update VM specs. New-VM does not honor the passed in parameters due to Template being used. + if ($null -ne $MemoryMB -And $null -ne $NumCpu) + { + Set-VM -VM $vm -MemoryMB $MemoryMB -NumCpu $NumCpu -CoresPerSocket 4 -Confirm:$false > $null + } + Get-HardDisk -VM $vm | Select-Object -First 1 | Set-HardDisk -CapacityGB 120 -Confirm:$false > $null + + # Configure Network (Assuming template networking may not be correct if shared across clusters) + $pg = Get-VirtualPortgroup -Name $Network -VMHost $(Get-VMHost -VM $vm) 2> $null + $vm | Get-NetworkAdapter | Set-NetworkAdapter -Portgroup $pg -confirm:$false > $null + + # Assign advanced settings + New-AdvancedSetting -Entity $vm -name "stealclock.enable" -value "TRUE" -confirm:$false -Force > $null + New-AdvancedSetting -Entity $vm -name "guestinfo.ignition.config.data.encoding" -value "base64" -confirm:$false -Force > $null + New-AdvancedSetting -Entity $vm -name "guestinfo.ignition.config.data" -value $IgnitionData -confirm:$false -Force > $null + New-AdvancedSetting -Entity $vm -name "guestinfo.hostname" -value $Name -Confirm:$false -Force > $null + + # Create ip kargs + # "guestinfo.afterburn.initrd.network-kargs" = "ip=${var.ipaddress}::${cidrhost(var.machine_cidr, 1)}:${cidrnetmask(var.machine_cidr)}:${var.vmname}:ens192:none:${join(":", var.dns_addresses)}" + # Example: ip=::::::: + if ($null -ne $Networking) + { + $kargs = "ip=$($Networking.ipAddress)::$($Networking.gateway):$($Networking.netmask):$($Networking.hostname):ens192:none:$($Networking.dns)" + New-AdvancedSetting -Entity $vm -name "guestinfo.afterburn.initrd.network-kargs" -value $kargs -Confirm:$false -Force > $null + } + + return $vm +} + +function New-VMConfigs { + $virtualMachines = @" +{ + "virtualmachines": {} +} +"@ | ConvertFrom-Json -Depth 2 + $fds = ConvertFrom-Json $failure_domains + + # Generate Bootstrap + $vm = createNode -FailureDomain $fds[0] -Type "bootstrap" -VCenter $vcenter -IPAddress $bootstrap_ip_address + add-member -Name "bootstrap" -value $vm -MemberType NoteProperty -InputObject $virtualMachines.virtualmachines + + # Generate Control Plane + for (($i =0); $i -lt $control_plane_count; $i++) { + $vm = createNode -FailureDomain $fds[$i % $fds.Length] -Type "master" -VCenter $vcenter -IPAddress $control_plane_ip_addresses[$i] + add-member -Name $control_plane_hostnames[$i] -value $vm -MemberType NoteProperty -InputObject $virtualMachines.virtualmachines + } + + # Generate Compute + for (($i =0); $i -lt $compute_count; $i++) { + $vm = createNode -FailureDomain $fds[$i % $fds.Length] -Type "worker" -VCenter $vcenter -IPAddress $compute_ip_addresses[$i] + add-member -Name $compute_hostnames[$i] -value $vm -MemberType NoteProperty -InputObject $virtualMachines.virtualmachines + } + + return $virtualMachines | ConvertTo-Json +} + +function createNode { + param ( + $FailureDomain, + $IPAddress, + $Type, + $VCenter + ) + + $vmConfig = @" +{ + "server": "$($VCenter)", + "datacenter": "$($FailureDomain. datacenter)", + "cluster": "$($FailureDomain.cluster)", + "network": "$($FailureDomain.network)", + "datastore": "$($FailureDomain.datastore)", + "type": "$($Type)", + "ip": "$($IPAddress)" +} +"@ + return ConvertFrom-Json -InputObject $vmConfig +} + +function New-LoadBalancerIgnition { + param ( + [string]$sshKey + ) + + $haproxyService = (Get-Content -Path ./lb/haproxy.service -Raw) | ConvertTo-Json + + $api = $control_plane_ip_addresses + $bootstrap_ip_address + if ($compute_count -gt 0) + { + $ingress = $compute_ip_addresses + } else { + $ingress = $control_plane_ip_addresses + } + + $Binding = @{ 'lb_ip_address' = $lb_ip_address; 'api' = $api; 'ingress' = $ingress } + $haproxyConfig = Invoke-EpsTemplate -Path "lb/haproxy.erb.tmpl" -Binding $Binding + + $haproxyConfig = [Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes($haproxyConfig)) + + $lbIgnition = @" +{ + "ignition": { "version": "3.0.0" }, + "passwd": { + "users": [ + { + "name": "core", + "sshAuthorizedKeys": [ + "$($sshKey)" + ] + } + ] + }, + "storage": { + "files": [{ + "path": "/etc/haproxy/haproxy.conf", + "mode": 420, + "contents": { "source": "data:text/plain;charset=utf-8;base64,$($haproxyConfig)" } + }] + }, + "systemd": { + "units": [{ + "name": "haproxy.service", + "enabled": true, + "contents": $($haproxyService) + }] + } +} +"@ + return $lbIgnition +} + +function New-VMNetworkConfig { + param( + $DNS, + $Gateway, + $Hostname, + $IPAddress, + $Netmask + ) + $network = $null + + $network = @" +{ + "ipAddress": "$($IPAddress)", + "netmask": "$($Netmask)", + "dns": "$($DNS)", + "hostname": "$($Hostname)", + "gateway": "$($Gateway)" +} +"@ + return ConvertFrom-Json -InputObject $network +} \ No newline at end of file diff --git a/upi/vsphere/upi.ps1 b/upi/vsphere/upi.ps1 new file mode 100644 index 0000000000..287c112fc7 --- /dev/null +++ b/upi/vsphere/upi.ps1 @@ -0,0 +1,540 @@ +#!/usr/bin/pwsh + +. .\variables.ps1 +. .\upi-functions.ps1 + +$ErrorActionPreference = "Stop" + +# since we do not have ca for vsphere certs, we'll just set insecure +Set-PowerCLIConfiguration -InvalidCertificateAction:Ignore -Confirm:$false | Out-Null +$Env:GOVC_INSECURE = 1 + +# Connect to vCenter +Connect-VIServer -Server $vcenter -Credential (Import-Clixml $vcentercredpath) + +if ($downloadInstaller) { + Write-Output "Downloading the most recent $($version) installer" + + $releaseApiUri = "https://api.github.com/repos/openshift/okd/releases" + $progressPreference = 'silentlyContinue' + $webrequest = Invoke-WebRequest -uri $releaseApiUri + $progressPreference = 'Continue' + $releases = ConvertFrom-Json $webrequest.Content -AsHashtable + $publishedDate = (Get-Date).AddDays(-365) + $currentRelease = $null + + foreach($r in $releases) { + if($r['name'] -like "*$($version)*") { + if ($publishedDate -lt $r['published_at'] ) { + $publishedDate = $r['published_at'] + $currentRelease = $r + } + } + } + + foreach($asset in $currentRelease['assets']) { + if($asset['name'] -like "openshift-install-linux*") { + $installerUrl = $asset['browser_download_url'] + } + } + + # If openshift-install doesn't exist on the path, download it and extract + if (-Not (Test-Path -Path "openshift-install")) { + + $progressPreference = 'silentlyContinue' + Invoke-WebRequest -uri $installerUrl -OutFile "installer.tar.gz" + tar -xvf "installer.tar.gz" + $progressPreference = 'Continue' + } +} + +if ($uploadTemplateOva) { + Write-Output "Downloading RHCOS OVA" + + # If the OVA doesn't exist on the path, determine the url from openshift-install and download it. + if (-Not (Test-Path -Path "template-$($Version).ova")) { + Start-Process -Wait -Path ./openshift-install -ArgumentList @("coreos", "print-stream-json") -RedirectStandardOutput coreos.json + + $coreosData = Get-Content -Path ./coreos.json | ConvertFrom-Json -AsHashtable + $ovaUri = $coreosData.architectures.x86_64.artifacts.vmware.formats.ova.disk.location + $progressPreference = 'silentlyContinue' + Invoke-WebRequest -uri $ovaUri -OutFile "template-$($Version).ova" + $progressPreference = 'Continue' + } +} + +$sshKey = [string](Get-Content -Path $sshkeypath -Raw:$true) -Replace '\n','' + +if ($createInstallConfig) { + # Without having to add additional powershell modules yaml is difficult to deal + # with. There is a supplied install-config.json which is converted to a powershell + # object + $config = ConvertFrom-Json -InputObject $installconfig + + # Set the install-config.json from upi-variables + $config.metadata.name = $clustername + $config.baseDomain = $basedomain + $config.sshKey = $sshKey + $config.platform.vsphere.vcenter = $vcenter + $config.platform.vsphere.username = $username + $config.platform.vsphere.password = $password + $config.platform.vsphere.datacenter = $datacenter + $config.platform.vsphere.defaultDatastore = $datastore + $config.platform.vsphere.cluster = $cluster + $config.platform.vsphere.network = $portgroup + # $config.platform.vsphere.apiVIP = $apivip + # $config.platform.vsphere.ingressVIP = $ingressvip + + $config.pullSecret = $pullsecret -replace "`n", "" -replace " ", "" + + # Write out the install-config.yaml (really json) + $config | ConvertTo-Json -Depth 8 | Out-File -FilePath install-config.yaml -Force:$true +} + +if ($generateIgnitions) { + # openshift-install create manifests + start-process -Wait -FilePath ./openshift-install -argumentlist @("create", "manifests") + + # Remove master machines and the worker machinesets + rm -f openshift/99_openshift-cluster-api_master-machines-*.yaml openshift/99_openshift-cluster-api_worker-machineset-*.yaml + + # openshift-install create ignition-configs + start-process -Wait -FilePath ./openshift-install -argumentlist @("create", "ignition-configs") +} + +# Check failure domains. If not set, create a default single failure domain from settings +if ($null -eq $failure_domains) { + Write-Output "Generating Failure Domain..." + $failure_domains = @" +[ + { + "datacenter": "$($datacenter)", + "cluster": "$($cluster)", + "datastore": "$($datastore)", + "network": "$($portgroup)" + } +] +"@ +} +$fds = $failure_domains | ConvertFrom-Json + +# Convert the installer metadata to a powershell object +$metadata = Get-Content -Path ./metadata.json | ConvertFrom-Json + +# Since we are using MachineSets for the workers make sure we set the +# template name to what is expected to be generated by the installer. +if ($null -eq $vm_template) { + $vm_template = "$( $metadata.infraID )-rhcos" +} + +# Create tag for all resources we create +$tagCategory = Get-TagCategory -Name "openshift-$($metadata.infraID)" -ErrorAction continue 2>$null +if (-Not $?) { + Write-Output "Creating Tag Category openshift-$($metadata.infraID)" + $tagCategory = New-TagCategory -Name "openshift-$($metadata.infraID)" -EntityType "urn:vim25:VirtualMachine","urn:vim25:ResourcePool","urn:vim25:Folder","urn:vim25:Datastore","urn:vim25:StoragePod" +} +$tag = Get-Tag -Category $tagCategory -Name "$($metadata.infraID)" -ErrorAction continue 2>$null +if (-Not $?) { + Write-Output "Creating Tag $($metadata.infraID)" + $tag = New-Tag -Category $tagCategory -Name "$($metadata.infraID)" +} + +# Check each failure domain for ova template +foreach ($fd in $fds) +{ + $datastoreInfo = Get-Datastore -Name $fd.datastore -Location $fd.datacenter + + # If the folder already exists + Write-Output "Checking for folder in failure domain $($fd.datacenter)/$($fd.cluster)" + $folder = Get-Folder -Name $metadata.infraID -Location $fd.datacenter -ErrorAction continue 2>$null + + # Otherwise create the folder within the datacenter as defined in the upi-variables + if (-Not $?) { + Write-Output "Creating folder $($metadata.infraID) in datacenter $($fd.datacenter)" + (get-view (Get-Datacenter -Name $fd.datacenter).ExtensionData.vmfolder).CreateFolder($metadata.infraID) + $folder = Get-Folder -Name $metadata.infraID -Location $fd.datacenter + New-TagAssignment -Entity $folder -Tag $tag > $null + } + + # If the rhcos virtual machine already exists + Write-Output "Checking for vm template in failure domain $($fd.datacenter)/$($fd.cluster)" + $template = Get-VM -Name $vm_template -Location $fd.datacenter -ErrorAction continue + + # Otherwise import the ova to a random host on the vSphere cluster + if (-Not$?) + { + $vmhost = Get-Random -InputObject (Get-VMHost -Location (Get-Cluster $fd.cluster)) + $ovfConfig = Get-OvfConfiguration -Ovf "template-$( $Version ).ova" + $ovfConfig.NetworkMapping.VM_Network.Value = $portgroup + $template = Import-Vapp -Source "template-$( $Version ).ova" -Name $vm_template -OvfConfiguration $ovfConfig -VMHost $vmhost -Datastore $datastoreInfo -InventoryLocation $folder -Force:$true + + $templateVIObj = Get-View -VIObject $template.Name + # Need to look into upgrading hardware. For me it keeps throwing exception. + <# try { + $templateVIObj.UpgradeVM($hardwareVersion) + } + catch { + Write-Output "Something happened setting VM hardware version" + Write-Output $_ + } #> + + New-TagAssignment -Entity $template -Tag $tag + Set-VM -VM $template -MemoryGB 16 -NumCpu 4 -CoresPerSocket 4 -Confirm:$false > $null + Get-HardDisk -VM $template | Select-Object -First 1 | Set-HardDisk -CapacityGB 120 -Confirm:$false > $null + New-AdvancedSetting -Entity $template -name "disk.EnableUUID" -value 'TRUE' -confirm:$false -Force > $null + New-AdvancedSetting -Entity $template -name "guestinfo.ignition.config.data.encoding" -value "base64" -confirm:$false -Force > $null + #$snapshot = New-Snapshot -VM $template -Name "linked-clone" -Description "linked-clone" -Memory -Quiesce + } +} + +Write-Output "Creating LB" + +# Data needed for LB VM creation +$rp = Get-Cluster -Name $fds[0].cluster -Server $vcenter +$datastoreInfo = Get-Datastore -Name $fds[0].datastore -Server $vcenter -Location $fds[0].datacenter +$folder = Get-Folder -Name $metadata.infraID -Location $fds[0].datacenter +$template = Get-VM -Name $vm_template -Location $fds[0].datacenter + +# Create LB for Cluster +$ignition = [Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes((New-LoadBalancerIgnition $sshKey))) +$network = New-VMNetworkConfig -Hostname "$($metadata.infraID)-lb" -IPAddress $lb_ip_address -Netmask $netmask -Gateway $gateway -DNS $dns -Network $failure_domains[0].network +$vm = New-OpenShiftVM -IgnitionData $ignition -Name "$($metadata.infraID)-lb" -Template $template -ResourcePool $rp -Datastore $datastoreInfo -Location $folder -Tag $tag -Networking $network -Network $($fds[0].network) -MemoryMB 8192 -NumCpu 4 +$vm | Start-VM + +# Take the $virtualmachines defined in upi-variables and convert to a powershell object +if ($null -eq $virtualmachines) +{ + $virtualmachines = New-VMConfigs +} +$vmHash = ConvertFrom-Json -InputObject $virtualmachines -AsHashtable + +Write-Progress -id 222 -Activity "Creating virtual machines" -PercentComplete 0 + +$vmStep = (100 / $vmHash.virtualmachines.Count) +$vmCount = 1 +foreach ($key in $vmHash.virtualmachines.Keys) { + $node = $vmHash.virtualmachines[$key] + + $name = "$($metadata.infraID)-$($key)" + Write-Output "Creating $($name)" + + $rp = Get-Cluster -Name $node.cluster -Server $node.server + ##$datastore = Get-Datastore -Name $node.datastore -Server $node.server + $datastoreInfo = Get-Datastore -Name $node.datastore -Location $node.datacenter + + # Pull network config for each node + if ($node.type -eq "master") { + $numCPU = $control_plane_num_cpus + $memory = $control_plane_memory + } elseif ($node.type -eq "worker") { + $numCPU = $compute_num_cpus + $memory = $compute_memory + } else { + # should only be bootstrap + $numCPU = $control_plane_num_cpus + $memory = $control_plane_memory + } + $ip = $node.ip + $network = New-VMNetworkConfig -Hostname $name -IPAddress $ip -Netmask $netmask -Gateway $gateway -DNS $dns + + # Get the content of the ignition file per machine type (bootstrap, master, worker) + $bytes = Get-Content -Path "./$($node.type).ign" -AsByteStream + $ignition = [Convert]::ToBase64String($bytes) + + # Get correct template / folder + $folder = Get-Folder -Name $metadata.infraID -Location $node.datacenter + $template = Get-VM -Name $vm_template -Location $($node.datacenter) + + # Clone the virtual machine from the imported template + #$vm = New-OpenShiftVM -Template $template -Name $name -ResourcePool $rp -Datastore $datastoreInfo -Location $folder -LinkedClone -ReferenceSnapshot $snapshot -IgnitionData $ignition -Tag $tag -Networking $network -NumCPU $numCPU -MemoryMB $memory + $vm = New-OpenShiftVM -Template $template -Name $name -ResourcePool $rp -Datastore $datastoreInfo -Location $folder -IgnitionData $ignition -Tag $tag -Networking $network -Network $node.network -NumCPU $numCPU -MemoryMB $memory + + # Assign tag so we can later clean up + # New-TagAssignment -Entity $vm -Tag $tag + # New-AdvancedSetting -Entity $vm -name "guestinfo.ignition.config.data" -value $ignition -confirm:$false -Force > $null + # New-AdvancedSetting -Entity $vm -name "guestinfo.hostname" -value $name -Confirm:$false -Force > $null + + if ($node.type -eq "master" -And $delayVMStart) { + # To give bootstrap some time to start, lets wait 2 minutes + Start-ThreadJob -ThrottleLimit 5 -InputObject $vm { + Start-Sleep -Seconds 90 + $input | Start-VM + } + } elseif ($node.type -eq "worker" -And $delayVMStart) { + # Workers are not needed right away, gotta wait till masters + # have started machine-server. wait 7 minutes to start. + Start-ThreadJob -ThrottleLimit 5 -InputObject $vm { + Start-Sleep -Seconds 600 + $input | Start-VM + } + } + else { + $vm | Start-VM + } + Write-Progress -id 222 -Activity "Creating virtual machines" -PercentComplete ($vmStep * $vmCount) + $vmCount++ +} +Write-Progress -id 222 -Activity "Completed virtual machines" -PercentComplete 100 -Completed + +## This is nice to have to clear screen when doing things manually. Maybe i'll +# make this configurable. +# Clear-Host + +# Instead of restarting openshift-install to wait for bootstrap, monitor +# the bootstrap configmap in the kube-system namespace + +# Extract the Client Certificate Data from auth/kubeconfig +$match = Select-String "client-certificate-data: (.*)" -Path ./auth/kubeconfig +[Byte[]]$bytes = [Convert]::FromBase64String($match.Matches.Groups[1].Value) +$clientCertData = [System.Text.Encoding]::ASCII.GetString($bytes) + +# Extract the Client Key Data from auth/kubeconfig +$match = Select-String "client-key-data: (.*)" -Path ./auth/kubeconfig +$bytes = [Convert]::FromBase64String($match.Matches.Groups[1].Value) +$clientKeyData = [System.Text.Encoding]::ASCII.GetString($bytes) + +# Create a X509Certificate2 object for Invoke-WebRequest +$cert = [System.Security.Cryptography.X509Certificates.X509Certificate2]::CreateFromPem($clientCertData, $clientKeyData) + +# Extract the kubernetes endpoint uri +$match = Select-String "server: (.*)" -Path ./auth/kubeconfig +$kubeurl = $match.Matches.Groups[1].Value + +if ($waitForComplete) +{ + $apiTimeout = (20*60) + $apiCount = 1 + $apiSleep = 30 + Write-Progress -Id 444 -Status "1% Complete" -Activity "API" -PercentComplete 1 + :api while ($true) { + Start-Sleep -Seconds $apiSleep + try { + $webrequest = Invoke-WebRequest -Uri "$($kubeurl)/version" -SkipCertificateCheck + $version = (ConvertFrom-Json $webrequest.Content).gitVersion + + if ($version -ne "" ) { + Write-Debug "API Version: $($version)" + Write-Progress -Id 444 -Status "Completed" -Activity "API" -PercentComplete 100 + break api + } + } + catch {} + + $percentage = ((($apiCount*$apiSleep)/$apiTimeout)*100) + if ($percentage -le 100) { + Write-Progress -Id 444 -Status "$percentage% Complete" -Activity "API" -PercentComplete $percentage + } + $apiCount++ + } + + $bootstrapTimeout = (30*60) + $bootstrapCount = 1 + $bootstrapSleep = 30 + Write-Progress -Id 333 -Status "1% Complete" -Activity "Bootstrap" -PercentComplete 1 + :bootstrap while ($true) + { + Start-Sleep -Seconds $bootstrapSleep + + try + { + $webrequest = Invoke-WebRequest -Certificate $cert -Uri "$( $kubeurl )/api/v1/namespaces/kube-system/configmaps/bootstrap" -SkipCertificateCheck + + $bootstrapStatus = (ConvertFrom-Json $webrequest.Content).data.status + + if ($bootstrapStatus -eq "complete") + { + Get-VM "$( $metadata.infraID )-bootstrap" | Stop-VM -Confirm:$false | Remove-VM -DeletePermanently -Confirm:$false + Write-Progress -Id 333 -Status "Completed" -Activity "Bootstrap" -PercentComplete 100 + break bootstrap + } + } + catch + { + } + + $percentage = ((($bootstrapCount*$bootstrapSleep)/$bootstrapTimeout)*100) + if ($percentage -le 100) + { + Write-Progress -Id 333 -Status "$percentage% Complete" -Activity "Bootstrap" -PercentComplete $percentage + } + else + { + Write-Output "Warning: Bootstrap taking longer than usual." -NoNewLine -ForegroundColor Yellow + } + + $bootstrapCount++ + } + + # Now that bootstrap is complete, we should be getting worker node CSRs that need to be approved before being + # able to finish installation. Lets monitor for CSRs, approve them and verify the number of worker nodes have + # now appeared and are Ready before moving on. + + # [ngirard@fedora ibm7-installs]$ oc get csr | grep Pending + #csr-2hgbd 2m52s kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending + #csr-lwmgf 2m19s kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending + #csr-scvk6 2m30s kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending + + # apis/certificates.k8s.io/v1/certificatesigningrequests + $csrTimeout = (600/5) + $csrCount = 1 + $csrSleep = 5 + Write-Progress -Id 222 -Status "1% Complete" -Activity "Worker Ready" -PercentComplete 0 + :csrLoop while ($true) + { + Start-Sleep -Seconds $csrSleep + + try + { + $webrequest = Invoke-WebRequest -Certificate $cert -Uri "$( $kubeurl )/apis/certificates.k8s.io/v1/certificatesigningrequests" -SkipCertificateCheck + + $csrs = (ConvertFrom-Json $webrequest.Content).items + + foreach ($csr in $csrs) + { + # Check if no status (Pending) and if its a type we are looking for (kubernetes.io/kubelet-serving) (kubernetes.io/kube-apiserver-client-kubelet) + $bootstrapper = "system:serviceaccount:openshift-machine-config-operator:node-bootstrapper" + $nodeUser = "system:node:" + + $csrUser = $csr.spec.username + if ($csr.status.conditions -eq $null -And ($csrUser -eq $bootstrapper -Or $csrUser.Contains($nodeUser))) + { + $conditions = New-Object System.Collections.ArrayList + $condition = @{ + type = "Approved" + status = "True" + reason = "PowershellApproved" + message = "This CSR was approved by script in PowerShell." + } + $conditions.Add($condition) > $null + $csr.status | add-member -Name "conditions" -value $conditions -MemberType NoteProperty + Write-Output "Accepting CSR: $( $csr.metadata.name )" + $csrResponse = Invoke-RestMethod -Method "Put" -Certificate $cert -Uri "$( $kubeurl )/apis/certificates.k8s.io/v1/certificatesigningrequests/$( $csr.metadata.name )/approval" -SkipCertificateCheck -Body (ConvertTo-Json $csr -Depth 6) + } + } + } + catch + { + #Write-Output $_ + } + + # Check number of worker nodes with NotReady/Ready. NotReady will be 1 pt where Ready will be 2. + $currentComputePoints = 0 + + try + { + $webrequest = Invoke-WebRequest -Certificate $cert -Uri "$( $kubeurl )/api/v1/nodes" -SkipCertificateCheck + + $nodes = (ConvertFrom-Json $webrequest.Content).items + + foreach ($node in $nodes) + { + if ($node.metadata.labels.psobject.properties.name -Contains "node-role.kubernetes.io/worker") + { + #Write-Output "Checking node $($node.metadata.name)" + foreach ($condition in $node.status.conditions) + { + if ($condition.type -eq "Ready") + { + #Write-Output "Is node ready? $($condition.status)" + if ($condition.status -eq "True") + { + $currentComputePoints = $currentComputePoints + 2 + } + else + { + $currentComputePoints++ + } + } + } + } + } + } + catch + { + #Write-Output $_ + } + + $maxComputePoints = $compute_count * 2 + $percentage = ((($currentComputePoints)/$maxComputePoints)*100) + if ($percentage -eq 100) + { + Write-Progress -Id 222 -Status "Completed" -Activity "Worker Ready" -PercentComplete 100 + break csrLoop + } + elseif ($percentage -le 100) + { + Write-Progress -Id 222 -Status "$percentage% Complete" -Activity "Worker Ready" -PercentComplete $percentage + } + + if ($csrCount -ge $csrTimeout) + { + Write-Output "Warning: Bootstrap taking longer than usual." -NoNewLine -ForegroundColor Yellow + break csrLoop + } + + $csrCount++ + } + + $progressMsg = "" + Write-Progress -Id 111 -Status "1% Complete" -Activity "Install" -PercentComplete 1 + :installcomplete while ($true) + { + Start-Sleep -Seconds 30 + try + { + $webrequest = Invoke-WebRequest -Certificate $cert -Uri "$( $kubeurl )/apis/config.openshift.io/v1/clusterversions" -SkipCertificateCheck + + $clusterversions = ConvertFrom-Json $webrequest.Content -AsHashtable + + # just like the installer check the status conditions of the clusterversions config + foreach ($condition in $clusterversions['items'][0]['status']['conditions']) + { + switch ($condition['type']) + { + "Progressing" { + if ($condition['status'] -eq "True") + { + + $matchper = ($condition['message'] | Select-String "^Working.*\(([0-9]{1,3})\%.*\)") + $matchmsg = ($condition['message'] | Select-String -AllMatches -Pattern "^(Working.*)\:.*") + + # During install, the status of CVO will / may go degraded due to operators going + # degraded from taking a while to install. It seems this is the new norm as control + # plane takes a while to roll out and certain operators go degraded until the control + # plane is stable. + if ($matchmsg.Matches.Groups -ne $null) + { + $progressMsg = $matchmsg.Matches.Groups[1].Value + $progressPercent = $matchper.Matches.Groups[1].Value + + Write-Progress -Id 111 -Status "$progressPercent% Complete - $( $progressMsg )" -Activity "Install" -PercentComplete $progressPercent + } + continue + } + } + "Available" { + if ($condition['status'] -eq "True") + { + Write-Progress -Id 111 -Activity "Install" -Status "Completed" -PercentComplete 100 + break installcomplete + } + continue + } + Default { + continue + } + } + } + } + catch + { + Write-Output "Unable to check operators" + Write-Output $_ + } + } +} + +Get-Job | Remove-Job + + +Write-Output "Install Complete!" \ No newline at end of file diff --git a/upi/vsphere/variables.ps1.example b/upi/vsphere/variables.ps1.example new file mode 100644 index 0000000000..178a1ae147 --- /dev/null +++ b/upi/vsphere/variables.ps1.example @@ -0,0 +1,165 @@ +# Modify these variables below for your environment +# Option for script to create install-config and modify for upi install +$createInstallConfig=$false +$downloadInstaller=$false +$uploadTemplateOva=$false +$generateIgnitions=$false +$waitForComplete=$false +$delayVMStart=$false + +# OKD variables +# OKD version to be installed +$version = "4.9" +$clustername = "openshift" +$basedomain = "vmc.devcluster.example.com" +$sshkeypath = "/home/bubba/.ssh/id_rsa.pub" +# trying to make this as simple as possible +# will reuse IPIs haproxy, keepalived +# then we can simply use DHCP. +$apivip = "192.168.1.10" +$ingressvip = "192.168.1.11" + + +# vCenter variables +$vcenter = "vcs8e-vc.ocp2.dev.cluster.com" +$username = "" +$password = '' +$portgroup = "ocp-ci-seg-1" +$datastore = "workload_share_vcs8eworkload_lrFsW" +$datacenter = "IBMCloud" +$cluster = "vcs-8e-workload" +$vcentercredpath = "secrets/vcenter-creds.xml" + +$pullsecret = @" +{"auths":{"fake":{"auth":"aWQ6cGFzcwo="}}} +"@ + +$dns = "8.8.8.8" +$gateway = "192.168.14.1" +$netmask = "255.255.255.0" + +$lb_ip_address = "192.168.14.10" +$bootstrap_ip_address = "192.168.14.11" + +# The IP addresses to assign to the control plane VMs. The length of this list +# must match the value of control_plane_count. +$control_plane_memory = 16384 +$control_plane_num_cpus = 4 +$control_plane_count = 3 +$control_plane_ip_addresses = "192.168.14.20", "192.168.14.21", "192.168.14.22" +$control_plane_hostnames = "control-plane-0", "control-plane-1", "control-plane-2" + +# The IP addresses to assign to the compute VMs. The length of this list must +# match the value of compute_count. +$compute_memory = 8192 +$compute_num_cpus = 4 +$compute_count = 3 +$compute_ip_addresses = "192.168.14.30", "192.168.14.31", "192.168.14.32" +$compute_hostnames = "compute-0", "compute-1", "compute-2" + +# If you do not need the script upload a template, you will need to specify the template here. +# You can also set this if you want to change the default name of template when its uploaded. +$vm_template = "" + +$failure_domains = @" +[ + { + // Name of the vSphere data center. + "datacenter": "datacenter-2", + // Name of the vSphere cluster. + "cluster": "vcs-mdcnc-workload-4", + // Name of the vSphere data store to use for the VMs. + "datastore": "mdcnc-ds-4", + // Name of the vSphere network to use for the VMs. + "network": "ocp-ci-seg-14" + } +] +"@ + +$virtualmachines =@" +{ + "virtualmachines": { + "bootstrap": { + "server": "$($vcenter)", + "datacenter": "$($datacenter)", + "cluster": "$($cluster)", + "network": "$($portgroup)", + "datastore": "$($datastore)", + "type": "bootstrap" + }, + "master-0": { + "server": "$($vcenter)", + "datacenter": "$($datacenter)", + "cluster": "$($cluster)", + "network": "$($portgroup)", + "datastore": "$($datastore)", + "type": "master" + }, + "master-1": { + "server": "$($vcenter)", + "datacenter": "$($datacenter)", + "cluster": "$($cluster)", + "network": "$($portgroup)", + "datastore": "$($datastore)", + "type": "master" + }, + "master-2": { + "type": "master", + "server": "$($vcenter)", + "datacenter": "$($datacenter)", + "cluster": "$($cluster)", + "network": "$($portgroup)", + "datastore": "$($datastore)", + }, + "worker-0": { + "type": "worker", + "server": "$($vcenter)", + "datacenter": "$($datacenter)", + "cluster": "$($cluster)", + "network": "$($portgroup)", + "datastore": "$($datastore)", + }, + "worker-1": { + "type": "worker", + "server": "$($vcenter)", + "datacenter": "$($datacenter)", + "cluster": "$($cluster)", + "network": "$($portgroup)", + "datastore": "$($datastore)", + }, + "worker-2": { + "type": "worker", + "server": "$($vcenter)", + "datacenter": "$($datacenter)", + "cluster": "$($cluster)", + "network": "$($portgroup)", + "datastore": "$($datastore)", + } + } +} +"@ + +$installconfig = @" +{ + "apiVersion": "v1", + "baseDomain": "domain", + "metadata": { + "name": "cluster" + }, + "platform": { + "vsphere": { + "vcenter": "vcsa", + "username": "username", + "password": "password", + "datacenter": "dc1", + "defaultDatastore": "datastore", + "cluster": "cluster", + "network": "network", +# "apiVIP": "ipaddr", +# "ingressVIP": "ipaddr" + } + }, + "pullSecret": "", + "sshKey": "" +} +"@