From 18a5bf73b5d69cbadb4f0dc57b1c05b9800ba519 Mon Sep 17 00:00:00 2001 From: Cody Hoag Date: Thu, 30 Jan 2020 14:13:19 -0500 Subject: [PATCH] Installing cluster on Azure UPI --- _topic_map.yml | 2 + .../install_config/installation-types.adoc | 2 +- .../installing-azure-user-infra.adoc | 126 ++++++++ modules/cli-installing-cli.adoc | 1 + modules/cli-logging-in-kubeadmin.adoc | 1 + modules/cluster-entitlements.adoc | 1 + modules/installation-approve-csrs.adoc | 1 + modules/installation-arm-bootstrap.adoc | 257 ++++++++++++++++ modules/installation-arm-control-plane.adoc | 289 ++++++++++++++++++ modules/installation-arm-dns.adoc | 252 +++++++++++++++ modules/installation-arm-image-storage.adoc | 55 ++++ modules/installation-arm-vnet.adoc | 125 ++++++++ modules/installation-arm-worker.adoc | 243 +++++++++++++++ .../installation-azure-create-dns-zones.adoc | 45 +++ ...tion-azure-create-ingress-dns-records.adoc | 70 +++++ ...re-create-resource-group-and-identity.adoc | 49 +++ .../installation-azure-increasing-limits.adoc | 1 + modules/installation-azure-limits.adoc | 12 +- .../installation-azure-network-config.adoc | 1 + modules/installation-azure-permissions.adoc | 3 +- modules/installation-azure-regions.adoc | 1 + .../installation-azure-service-principal.adoc | 1 + ...tallation-azure-user-infra-completing.adoc | 32 ++ ...tion-azure-user-infra-deploying-rhcos.adoc | 42 +++ ...tion-azure-user-infra-uploading-rhcos.adoc | 81 +++++ ...n-azure-user-infra-wait-for-bootstrap.adoc | 50 +++ modules/installation-configure-proxy.adoc | 1 + ...installation-creating-azure-bootstrap.adoc | 52 ++++ ...allation-creating-azure-control-plane.adoc | 55 ++++ modules/installation-creating-azure-dns.adoc | 65 ++++ modules/installation-creating-azure-vnet.adoc | 45 +++ .../installation-creating-azure-worker.adoc | 58 ++++ modules/installation-extracting-infraid.adoc | 32 ++ modules/installation-initializing.adoc | 7 + modules/installation-obtaining-installer.adoc | 1 + ...orting-common-variables-arm-templates.adoc | 55 ++++ ...-infra-generate-k8s-manifest-ignition.adoc | 48 ++- modules/installation-user-infra-generate.adoc | 28 ++ modules/ssh-agent-using.adoc | 7 + 39 files changed, 2177 insertions(+), 20 deletions(-) create mode 100644 installing/installing_azure/installing-azure-user-infra.adoc create mode 100644 modules/installation-arm-bootstrap.adoc create mode 100644 modules/installation-arm-control-plane.adoc create mode 100644 modules/installation-arm-dns.adoc create mode 100644 modules/installation-arm-image-storage.adoc create mode 100644 modules/installation-arm-vnet.adoc create mode 100644 modules/installation-arm-worker.adoc create mode 100644 modules/installation-azure-create-dns-zones.adoc create mode 100644 modules/installation-azure-create-ingress-dns-records.adoc create mode 100644 modules/installation-azure-create-resource-group-and-identity.adoc create mode 100644 modules/installation-azure-user-infra-completing.adoc create mode 100644 modules/installation-azure-user-infra-deploying-rhcos.adoc create mode 100644 modules/installation-azure-user-infra-uploading-rhcos.adoc create mode 100644 modules/installation-azure-user-infra-wait-for-bootstrap.adoc create mode 100644 modules/installation-creating-azure-bootstrap.adoc create mode 100644 modules/installation-creating-azure-control-plane.adoc create mode 100644 modules/installation-creating-azure-dns.adoc create mode 100644 modules/installation-creating-azure-vnet.adoc create mode 100644 modules/installation-creating-azure-worker.adoc create mode 100644 modules/installation-user-infra-exporting-common-variables-arm-templates.adoc diff --git a/_topic_map.yml b/_topic_map.yml index 7dc26a791f..0c30399a7c 100644 --- a/_topic_map.yml +++ b/_topic_map.yml @@ -126,6 +126,8 @@ Topics: File: installing-azure-vnet - Name: Installing a private cluster on Azure File: installing-azure-private + - Name: Installing a cluster on Azure using ARM templates + File: installing-azure-user-infra - Name: Uninstalling a cluster on Azure File: uninstalling-cluster-azure - Name: Installing on GCP diff --git a/installing/install_config/installation-types.adoc b/installing/install_config/installation-types.adoc index 1725fe75eb..bd692158da 100644 --- a/installing/install_config/installation-types.adoc +++ b/installing/install_config/installation-types.adoc @@ -68,7 +68,7 @@ Not all installation options are currently available for all platforms, as shown |Custom |xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[X] -| +|xref:../../installing/installing_azure/installing-azure-user-infra.adoc#installing-azure-user-infra[X] |xref:../../installing/installing_gcp/installing-gcp-user-infra.adoc#installing-gcp-user-infra[X] | |xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[X] diff --git a/installing/installing_azure/installing-azure-user-infra.adoc b/installing/installing_azure/installing-azure-user-infra.adoc new file mode 100644 index 0000000000..727e7915b0 --- /dev/null +++ b/installing/installing_azure/installing-azure-user-infra.adoc @@ -0,0 +1,126 @@ +[id="installing-azure-user-infra"] += Installing a cluster on Azure using ARM templates +include::modules/common-attributes.adoc[] +:context: installing-azure-user-infra + +toc::[] + +In {product-title} version {product-version}, you can install a cluster on +Microsoft Azure by using infrastructure that you provide. + +Several +link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/overview[Azure Resource Manager] +(ARM) templates are provided to assist in completing these steps or to help +model your own. You can also create the required resources through other +methods; the templates are just an example. + +.Prerequisites + +* Review details about the +xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] +processes. +* xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[Configure an Azure account] +to host the cluster. +* Download the Azure CLI and install it on your computer. See +link:https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest[Install the Azure CLI] +in the Azure documentation. The documentation below was last tested using +version `2.2.0` of the Azure CLI. Azure CLI commands might perform differently +based on the version you use. +* If you use a firewall and plan to use telemetry, you must +xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configure the firewall to allow the sites] that your cluster requires access to. ++ +[NOTE] +==== +Be sure to also review this site list if you are configuring a proxy. +==== + +include::modules/cluster-entitlements.adoc[leveloffset=+1] + +[id="installation-azure-user-infra-config-project"] +== Configuring your Azure project + +Before you can install {product-title}, you must configure an Azure project to +host it. + +[IMPORTANT] +==== +All Azure resources that are available through public endpoints are subject to +resource name restrictions, and you cannot create resources that use certain +terms. For a list of terms that Azure restricts, see +link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-reserved-resource-name[Resolve reserved resource name errors] +in the Azure documentation. +==== + +include::modules/installation-azure-limits.adoc[leveloffset=+2] +include::modules/installation-azure-network-config.adoc[leveloffset=+2] + +You can view Azure's DNS solution by visiting this +xref:installation-azure-create-dns-zones_{context}[example for creating DNS zones]. + +include::modules/installation-azure-increasing-limits.adoc[leveloffset=+2] + +[id="csr-management-azure_{context}"] +=== Certificate signing requests management + +Because your cluster has limited access to automatic machine management when you +use infrastructure that you provision, you must provide a mechanism for approving +cluster certificate signing requests (CSRs) after installation. The +`kube-controller-manager` only approves the kubelet client CSRs. The +`machine-approver` cannot guarantee the validity of a serving certificate +that is requested by using kubelet credentials because it cannot confirm that +the correct machine issued the request. You must determine and implement a +method of verifying the validity of the kubelet serving certificate requests +and approving them. + +include::modules/installation-azure-permissions.adoc[leveloffset=+2] +include::modules/installation-azure-service-principal.adoc[leveloffset=+2] +include::modules/installation-azure-regions.adoc[leveloffset=+2] + +include::modules/installation-obtaining-installer.adoc[leveloffset=+1] + +include::modules/ssh-agent-using.adoc[leveloffset=+1] + +include::modules/installation-user-infra-generate.adoc[leveloffset=+1] +include::modules/installation-initializing.adoc[leveloffset=+2] +include::modules/installation-configure-proxy.adoc[leveloffset=+2] +include::modules/installation-user-infra-exporting-common-variables-arm-templates.adoc[leveloffset=+2] +include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2] + +include::modules/installation-azure-create-resource-group-and-identity.adoc[leveloffset=+1] + +include::modules/installation-azure-user-infra-uploading-rhcos.adoc[leveloffset=+1] + +include::modules/installation-azure-create-dns-zones.adoc[leveloffset=+1] + +You can learn more about xref:installation-azure-network-config_{context}[configuring a public DNS zone in Azure] +by visiting that section. + +include::modules/installation-creating-azure-vnet.adoc[leveloffset=+1] +include::modules/installation-arm-vnet.adoc[leveloffset=+2] + +include::modules/installation-azure-user-infra-deploying-rhcos.adoc[leveloffset=+1] +include::modules/installation-arm-image-storage.adoc[leveloffset=+2] + +include::modules/installation-creating-azure-dns.adoc[leveloffset=+1] +include::modules/installation-arm-dns.adoc[leveloffset=+2] + +include::modules/installation-creating-azure-bootstrap.adoc[leveloffset=+1] +include::modules/installation-arm-bootstrap.adoc[leveloffset=+2] + +include::modules/installation-creating-azure-control-plane.adoc[leveloffset=+1] +include::modules/installation-arm-control-plane.adoc[leveloffset=+2] + +include::modules/installation-azure-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] + +include::modules/installation-creating-azure-worker.adoc[leveloffset=+1] +include::modules/installation-arm-worker.adoc[leveloffset=+2] + +include::modules/cli-installing-cli.adoc[leveloffset=+1] + +include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] + +include::modules/installation-approve-csrs.adoc[leveloffset=+1] + +include::modules/installation-azure-create-ingress-dns-records.adoc[leveloffset=+1] + +include::modules/installation-azure-user-infra-completing.adoc[leveloffset=+1] diff --git a/modules/cli-installing-cli.adoc b/modules/cli-installing-cli.adoc index 132cbbff78..be478f7b3b 100644 --- a/modules/cli-installing-cli.adoc +++ b/modules/cli-installing-cli.adoc @@ -11,6 +11,7 @@ // * installing/installing_azure/installing-azure-default.adoc // * installing/installing_azure/installing-azure-private.adoc // * installing/installing_azure/installing-azure-vnet.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc // * installing/installing_bare_metal/installing-bare-metal.adoc // * installing/installing_gcp/installing-gcp-customizations.adoc // * installing/installing_gcp/installing-gcp-private.adoc diff --git a/modules/cli-logging-in-kubeadmin.adoc b/modules/cli-logging-in-kubeadmin.adoc index ae352f2d98..b005a6cde1 100644 --- a/modules/cli-logging-in-kubeadmin.adoc +++ b/modules/cli-logging-in-kubeadmin.adoc @@ -10,6 +10,7 @@ // * installing/installing_azure/installing-azure-default.adoc // * installing/installing_azure/installing-azure-private.adoc // * installing/installing_azure/installing-azure-vnet.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc // * installing/installing_bare_metal/installing-bare-metal.adoc // * installing/installing_gcp/installing-gcp-customizations.adoc // * installing/installing_gcp/installing-gcp-private.adoc diff --git a/modules/cluster-entitlements.adoc b/modules/cluster-entitlements.adoc index 112d1d2223..602c57d03d 100644 --- a/modules/cluster-entitlements.adoc +++ b/modules/cluster-entitlements.adoc @@ -11,6 +11,7 @@ // * installing/installing_azure/installing-azure-default.adoc // * installing/installing_azure/installing-azure-private.adoc // * installing/installing_azure/installing-azure-vnet.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc // * installing/installing_bare_metal/installing-bare-metal.adoc // * installing/installing_gcp/installing-gcp-customizations.adoc // * installing/installing_gcp/installing-gcp-private.adoc diff --git a/modules/installation-approve-csrs.adoc b/modules/installation-approve-csrs.adoc index ce86dbc081..77a34e7673 100644 --- a/modules/installation-approve-csrs.adoc +++ b/modules/installation-approve-csrs.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * installing/installing_aws/installing-aws-user-infra.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc // * installing/installing_gcp/installing-gcp-user-infra.adoc // * installing/installing_gcp/installing-gcp-restricted-networks.adoc // * installing/installing_bare_metal/installing-bare-metal.adoc diff --git a/modules/installation-arm-bootstrap.adoc b/modules/installation-arm-bootstrap.adoc new file mode 100644 index 0000000000..80c35a742d --- /dev/null +++ b/modules/installation-arm-bootstrap.adoc @@ -0,0 +1,257 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-arm-bootstrap_{context}"] += ARM template for the bootstrap machine + +You can use the following Azure Resource Manager (ARM) template to deploy the +bootstrap machine that you need for your {product-title} cluster: + +.`04_bootstrap.json` ARM template +[source,json] +---- +{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "baseName" : { + "type" : "string", + "minLength" : 1, + "metadata" : { + "description" : "Base name to be used in resource names (usually the cluster's Infra ID)" + } + }, + "bootstrapIgnition" : { + "type" : "string", + "minLength" : 1, + "metadata" : { + "description" : "Bootstrap ignition content for the bootstrap cluster" + } + }, + "sshKeyData" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH RSA public key file as a string." + } + }, + "bootstrapVMSize" : { + "type" : "string", + "defaultValue" : "Standard_D4s_v3", + "allowedValues" : [ + "Standard_A2", + "Standard_A3", + "Standard_A4", + "Standard_A5", + "Standard_A6", + "Standard_A7", + "Standard_A8", + "Standard_A9", + "Standard_A10", + "Standard_A11", + "Standard_D2", + "Standard_D3", + "Standard_D4", + "Standard_D11", + "Standard_D12", + "Standard_D13", + "Standard_D14", + "Standard_D2_v2", + "Standard_D3_v2", + "Standard_D4_v2", + "Standard_D5_v2", + "Standard_D8_v3", + "Standard_D11_v2", + "Standard_D12_v2", + "Standard_D13_v2", + "Standard_D14_v2", + "Standard_E2_v3", + "Standard_E4_v3", + "Standard_E8_v3", + "Standard_E16_v3", + "Standard_E32_v3", + "Standard_E64_v3", + "Standard_E2s_v3", + "Standard_E4s_v3", + "Standard_E8s_v3", + "Standard_E16s_v3", + "Standard_E32s_v3", + "Standard_E64s_v3", + "Standard_G1", + "Standard_G2", + "Standard_G3", + "Standard_G4", + "Standard_G5", + "Standard_DS2", + "Standard_DS3", + "Standard_DS4", + "Standard_DS11", + "Standard_DS12", + "Standard_DS13", + "Standard_DS14", + "Standard_DS2_v2", + "Standard_DS3_v2", + "Standard_DS4_v2", + "Standard_DS5_v2", + "Standard_DS11_v2", + "Standard_DS12_v2", + "Standard_DS13_v2", + "Standard_DS14_v2", + "Standard_GS1", + "Standard_GS2", + "Standard_GS3", + "Standard_GS4", + "Standard_GS5", + "Standard_D2s_v3", + "Standard_D4s_v3", + "Standard_D8s_v3" + ], + "metadata" : { + "description" : "The size of the Bootstrap Virtual Machine" + } + } + }, + "variables" : { + "location" : "[resourceGroup().location]", + "virtualNetworkName" : "[concat(parameters('baseName'), '-vnet')]", + "virtualNetworkID" : "[resourceId('Microsoft.Network/virtualNetworks', variables('virtualNetworkName'))]", + "masterSubnetName" : "[concat(parameters('baseName'), '-master-subnet')]", + "masterSubnetRef" : "[concat(variables('virtualNetworkID'), '/subnets/', variables('masterSubnetName'))]", + "masterLoadBalancerName" : "[concat(parameters('baseName'), '-public-lb')]", + "internalLoadBalancerName" : "[concat(parameters('baseName'), '-internal-lb')]", + "sshKeyPath" : "/home/core/.ssh/authorized_keys", + "identityName" : "[concat(parameters('baseName'), '-identity')]", + "vmName" : "[concat(parameters('baseName'), '-bootstrap')]", + "nicName" : "[concat(variables('vmName'), '-nic')]", + "imageName" : "[concat(parameters('baseName'), '-image')]", + "controlPlaneNsgName" : "[concat(parameters('baseName'), '-controlplane-nsg')]", + "sshPublicIpAddressName" : "[concat(variables('vmName'), '-ssh-pip')]" + }, + "resources" : [ + { + "apiVersion" : "2018-12-01", + "type" : "Microsoft.Network/publicIPAddresses", + "name" : "[variables('sshPublicIpAddressName')]", + "location" : "[variables('location')]", + "sku": { + "name": "Standard" + }, + "properties" : { + "publicIPAllocationMethod" : "Static", + "dnsSettings" : { + "domainNameLabel" : "[variables('sshPublicIpAddressName')]" + } + } + }, + { + "apiVersion" : "2018-06-01", + "type" : "Microsoft.Network/networkInterfaces", + "name" : "[variables('nicName')]", + "location" : "[variables('location')]", + "dependsOn" : [ + "[resourceId('Microsoft.Network/publicIPAddresses', variables('sshPublicIpAddressName'))]" + ], + "properties" : { + "ipConfigurations" : [ + { + "name" : "pipConfig", + "properties" : { + "privateIPAllocationMethod" : "Dynamic", + "publicIPAddress": { + "id": "[resourceId('Microsoft.Network/publicIPAddresses', variables('sshPublicIpAddressName'))]" + }, + "subnet" : { + "id" : "[variables('masterSubnetRef')]" + }, + "loadBalancerBackendAddressPools" : [ + { + "id" : "[concat('/subscriptions/', subscription().subscriptionId, '/resourceGroups/', resourceGroup().name, '/providers/Microsoft.Network/loadBalancers/', variables('masterLoadBalancerName'), '/backendAddressPools/public-lb-backend')]" + }, + { + "id" : "[concat('/subscriptions/', subscription().subscriptionId, '/resourceGroups/', resourceGroup().name, '/providers/Microsoft.Network/loadBalancers/', variables('internalLoadBalancerName'), '/backendAddressPools/internal-lb-backend')]" + } + ] + } + } + ] + } + }, + { + "apiVersion" : "2018-06-01", + "type" : "Microsoft.Compute/virtualMachines", + "name" : "[variables('vmName')]", + "location" : "[variables('location')]", + "identity" : { + "type" : "userAssigned", + "userAssignedIdentities" : { + "[resourceID('Microsoft.ManagedIdentity/userAssignedIdentities/', variables('identityName'))]" : {} + } + }, + "dependsOn" : [ + "[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]" + ], + "properties" : { + "hardwareProfile" : { + "vmSize" : "[parameters('bootstrapVMSize')]" + }, + "osProfile" : { + "computerName" : "[variables('vmName')]", + "adminUsername" : "core", + "customData" : "[parameters('bootstrapIgnition')]", + "linuxConfiguration" : { + "disablePasswordAuthentication" : true, + "ssh" : { + "publicKeys" : [ + { + "path" : "[variables('sshKeyPath')]", + "keyData" : "[parameters('sshKeyData')]" + } + ] + } + } + }, + "storageProfile" : { + "imageReference": { + "id": "[resourceId('Microsoft.Compute/images', variables('imageName'))]" + }, + "osDisk" : { + "name": "[concat(variables('vmName'),'_OSDisk')]", + "osType" : "Linux", + "createOption" : "FromImage", + "managedDisk": { + "storageAccountType": "Premium_LRS" + }, + "diskSizeGB" : 100 + } + }, + "networkProfile" : { + "networkInterfaces" : [ + { + "id" : "[resourceId('Microsoft.Network/networkInterfaces', variables('nicName'))]" + } + ] + } + } + }, + { + "apiVersion" : "2018-06-01", + "type": "Microsoft.Network/networkSecurityGroups/securityRules", + "name" : "[concat(variables('controlPlaneNsgName'), '/bootstrap_ssh_in')]", + "location" : "[variables('location')]", + "dependsOn" : [ + "[resourceId('Microsoft.Compute/virtualMachines', variables('vmName'))]" + ], + "properties": { + "protocol" : "Tcp", + "sourcePortRange" : "*", + "destinationPortRange" : "22", + "sourceAddressPrefix" : "*", + "destinationAddressPrefix" : "*", + "access" : "Allow", + "priority" : 100, + "direction" : "Inbound" + } + } + ] +} +---- diff --git a/modules/installation-arm-control-plane.adoc b/modules/installation-arm-control-plane.adoc new file mode 100644 index 0000000000..63aad3417a --- /dev/null +++ b/modules/installation-arm-control-plane.adoc @@ -0,0 +1,289 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-arm-control-plane_{context}"] += ARM template for control plane machines + +You can use the following Azure Resource Manager (ARM) template to deploy the +control plane machines that you need for your {product-title} cluster: + +.`05_masters.json` ARM template +[source,json] +---- +{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "baseName" : { + "type" : "string", + "minLength" : 1, + "metadata" : { + "description" : "Base name to be used in resource names (usually the cluster's Infra ID)" + } + }, + "masterIgnition" : { + "type" : "string", + "metadata" : { + "description" : "Ignition content for the master nodes" + } + }, + "numberOfMasters" : { + "type" : "int", + "defaultValue" : 3, + "minValue" : 2, + "maxValue" : 30, + "metadata" : { + "description" : "Number of OpenShift masters to deploy" + } + }, + "sshKeyData" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH RSA public key file as a string" + } + }, + "privateDNSZoneName" : { + "type" : "string", + "metadata" : { + "description" : "Name of the private DNS zone the master nodes are going to be attached to" + } + }, + "masterVMSize" : { + "type" : "string", + "defaultValue" : "Standard_D8s_v3", + "allowedValues" : [ + "Standard_A2", + "Standard_A3", + "Standard_A4", + "Standard_A5", + "Standard_A6", + "Standard_A7", + "Standard_A8", + "Standard_A9", + "Standard_A10", + "Standard_A11", + "Standard_D2", + "Standard_D3", + "Standard_D4", + "Standard_D11", + "Standard_D12", + "Standard_D13", + "Standard_D14", + "Standard_D2_v2", + "Standard_D3_v2", + "Standard_D4_v2", + "Standard_D5_v2", + "Standard_D8_v3", + "Standard_D11_v2", + "Standard_D12_v2", + "Standard_D13_v2", + "Standard_D14_v2", + "Standard_E2_v3", + "Standard_E4_v3", + "Standard_E8_v3", + "Standard_E16_v3", + "Standard_E32_v3", + "Standard_E64_v3", + "Standard_E2s_v3", + "Standard_E4s_v3", + "Standard_E8s_v3", + "Standard_E16s_v3", + "Standard_E32s_v3", + "Standard_E64s_v3", + "Standard_G1", + "Standard_G2", + "Standard_G3", + "Standard_G4", + "Standard_G5", + "Standard_DS2", + "Standard_DS3", + "Standard_DS4", + "Standard_DS11", + "Standard_DS12", + "Standard_DS13", + "Standard_DS14", + "Standard_DS2_v2", + "Standard_DS3_v2", + "Standard_DS4_v2", + "Standard_DS5_v2", + "Standard_DS11_v2", + "Standard_DS12_v2", + "Standard_DS13_v2", + "Standard_DS14_v2", + "Standard_GS1", + "Standard_GS2", + "Standard_GS3", + "Standard_GS4", + "Standard_GS5", + "Standard_D2s_v3", + "Standard_D4s_v3", + "Standard_D8s_v3" + ], + "metadata" : { + "description" : "The size of the Master Virtual Machines" + } + } + }, + "variables" : { + "location" : "[resourceGroup().location]", + "virtualNetworkName" : "[concat(parameters('baseName'), '-vnet')]", + "virtualNetworkID" : "[resourceId('Microsoft.Network/virtualNetworks', variables('virtualNetworkName'))]", + "masterSubnetName" : "[concat(parameters('baseName'), '-master-subnet')]", + "masterSubnetRef" : "[concat(variables('virtualNetworkID'), '/subnets/', variables('masterSubnetName'))]", + "masterLoadBalancerName" : "[concat(parameters('baseName'), '-public-lb')]", + "internalLoadBalancerName" : "[concat(parameters('baseName'), '-internal-lb')]", + "sshKeyPath" : "/home/core/.ssh/authorized_keys", + "identityName" : "[concat(parameters('baseName'), '-identity')]", + "imageName" : "[concat(parameters('baseName'), '-image')]", + "copy" : [ + { + "name" : "vmNames", + "count" : "[parameters('numberOfMasters')]", + "input" : "[concat(parameters('baseName'), '-master-', copyIndex('vmNames'))]" + } + ] + }, + "resources" : [ + { + "apiVersion" : "2018-06-01", + "type" : "Microsoft.Network/networkInterfaces", + "copy" : { + "name" : "nicCopy", + "count" : "[length(variables('vmNames'))]" + }, + "name" : "[concat(variables('vmNames')[copyIndex()], '-nic')]", + "location" : "[variables('location')]", + "properties" : { + "ipConfigurations" : [ + { + "name" : "pipConfig", + "properties" : { + "privateIPAllocationMethod" : "Dynamic", + "subnet" : { + "id" : "[variables('masterSubnetRef')]" + }, + "loadBalancerBackendAddressPools" : [ + { + "id" : "[concat('/subscriptions/', subscription().subscriptionId, '/resourceGroups/', resourceGroup().name, '/providers/Microsoft.Network/loadBalancers/', variables('masterLoadBalancerName'), '/backendAddressPools/public-lb-backend')]" + }, + { + "id" : "[concat('/subscriptions/', subscription().subscriptionId, '/resourceGroups/', resourceGroup().name, '/providers/Microsoft.Network/loadBalancers/', variables('internalLoadBalancerName'), '/backendAddressPools/internal-lb-backend')]" + } + ] + } + } + ] + } + }, + { + "apiVersion": "2018-09-01", + "type": "Microsoft.Network/privateDnsZones/SRV", + "name": "[concat(parameters('privateDNSZoneName'), '/_etcd-server-ssl._tcp')]", + "location" : "[variables('location')]", + "properties": { + "ttl": 60, + "copy": [{ + "name": "srvRecords", + "count": "[length(variables('vmNames'))]", + "input": { + "priority": 0, + "weight" : 10, + "port" : 2380, + "target" : "[concat('etcd-', copyIndex('srvRecords'), '.', parameters('privateDNSZoneName'))]" + } + }] + } + }, + { + "apiVersion": "2018-09-01", + "type": "Microsoft.Network/privateDnsZones/A", + "copy" : { + "name" : "dnsCopy", + "count" : "[length(variables('vmNames'))]" + }, + "name": "[concat(parameters('privateDNSZoneName'), '/etcd-', copyIndex())]", + "location" : "[variables('location')]", + "dependsOn" : [ + "[concat('Microsoft.Network/networkInterfaces/', concat(variables('vmNames')[copyIndex()], '-nic'))]" + ], + "properties": { + "ttl": 60, + "aRecords": [ + { + "ipv4Address": "[reference(concat(variables('vmNames')[copyIndex()], '-nic')).ipConfigurations[0].properties.privateIPAddress]" + } + ] + } + }, + { + "apiVersion" : "2018-06-01", + "type" : "Microsoft.Compute/virtualMachines", + "copy" : { + "name" : "vmCopy", + "count" : "[length(variables('vmNames'))]" + }, + "name" : "[variables('vmNames')[copyIndex()]]", + "location" : "[variables('location')]", + "identity" : { + "type" : "userAssigned", + "userAssignedIdentities" : { + "[resourceID('Microsoft.ManagedIdentity/userAssignedIdentities/', variables('identityName'))]" : {} + } + }, + "dependsOn" : [ + "[concat('Microsoft.Network/networkInterfaces/', concat(variables('vmNames')[copyIndex()], '-nic'))]", + "[concat('Microsoft.Network/privateDnsZones/', parameters('privateDNSZoneName'), '/A/etcd-', copyIndex())]", + "[concat('Microsoft.Network/privateDnsZones/', parameters('privateDNSZoneName'), '/SRV/_etcd-server-ssl._tcp')]" + ], + "properties" : { + "hardwareProfile" : { + "vmSize" : "[parameters('masterVMSize')]" + }, + "osProfile" : { + "computerName" : "[variables('vmNames')[copyIndex()]]", + "adminUsername" : "core", + "customData" : "[parameters('masterIgnition')]", + "linuxConfiguration" : { + "disablePasswordAuthentication" : true, + "ssh" : { + "publicKeys" : [ + { + "path" : "[variables('sshKeyPath')]", + "keyData" : "[parameters('sshKeyData')]" + } + ] + } + } + }, + "storageProfile" : { + "imageReference": { + "id": "[resourceId('Microsoft.Compute/images', variables('imageName'))]" + }, + "osDisk" : { + "name": "[concat(variables('vmNames')[copyIndex()], '_OSDisk')]", + "osType" : "Linux", + "createOption" : "FromImage", + "caching": "ReadOnly", + "writeAcceleratorEnabled": false, + "managedDisk": { + "storageAccountType": "Premium_LRS" + }, + "diskSizeGB" : 128 + } + }, + "networkProfile" : { + "networkInterfaces" : [ + { + "id" : "[resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmNames')[copyIndex()], '-nic'))]", + "properties": { + "primary": false + } + } + ] + } + } + } + ] +} +---- diff --git a/modules/installation-arm-dns.adoc b/modules/installation-arm-dns.adoc new file mode 100644 index 0000000000..f26a582551 --- /dev/null +++ b/modules/installation-arm-dns.adoc @@ -0,0 +1,252 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-arm-dns_{context}"] += ARM template for the network and load balancers + +You can use the following Azure Resource Manager (ARM) template to deploy the +networking objects and load balancers that you need for your {product-title} +cluster: + +.`03_infra.json` ARM template +[source,json] +---- +{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "baseName" : { + "type" : "string", + "minLength" : 1, + "metadata" : { + "description" : "Base name to be used in resource names (usually the cluster's Infra ID)" + } + }, + "privateDNSZoneName" : { + "type" : "string", + "metadata" : { + "description" : "Name of the private DNS zone" + } + } + }, + "variables" : { + "location" : "[resourceGroup().location]", + "virtualNetworkName" : "[concat(parameters('baseName'), '-vnet')]", + "virtualNetworkID" : "[resourceId('Microsoft.Network/virtualNetworks', variables('virtualNetworkName'))]", + "masterSubnetName" : "[concat(parameters('baseName'), '-master-subnet')]", + "masterSubnetRef" : "[concat(variables('virtualNetworkID'), '/subnets/', variables('masterSubnetName'))]", + "masterPublicIpAddressName" : "[concat(parameters('baseName'), '-master-pip')]", + "masterPublicIpAddressID" : "[resourceId('Microsoft.Network/publicIPAddresses', variables('masterPublicIpAddressName'))]", + "masterLoadBalancerName" : "[concat(parameters('baseName'), '-public-lb')]", + "masterLoadBalancerID" : "[resourceId('Microsoft.Network/loadBalancers', variables('masterLoadBalancerName'))]", + "internalLoadBalancerName" : "[concat(parameters('baseName'), '-internal-lb')]", + "internalLoadBalancerID" : "[resourceId('Microsoft.Network/loadBalancers', variables('internalLoadBalancerName'))]", + "skuName": "Standard" + }, + "resources" : [ + { + "apiVersion" : "2018-12-01", + "type" : "Microsoft.Network/publicIPAddresses", + "name" : "[variables('masterPublicIpAddressName')]", + "location" : "[variables('location')]", + "sku": { + "name": "[variables('skuName')]" + }, + "properties" : { + "publicIPAllocationMethod" : "Static", + "dnsSettings" : { + "domainNameLabel" : "[variables('masterPublicIpAddressName')]" + } + } + }, + { + "apiVersion" : "2018-12-01", + "type" : "Microsoft.Network/loadBalancers", + "name" : "[variables('masterLoadBalancerName')]", + "location" : "[variables('location')]", + "sku": { + "name": "[variables('skuName')]" + }, + "dependsOn" : [ + "[concat('Microsoft.Network/publicIPAddresses/', variables('masterPublicIpAddressName'))]" + ], + "properties" : { + "frontendIPConfigurations" : [ + { + "name" : "public-lb-ip", + "properties" : { + "publicIPAddress" : { + "id" : "[variables('masterPublicIpAddressID')]" + } + } + } + ], + "backendAddressPools" : [ + { + "name" : "public-lb-backend" + } + ], + "loadBalancingRules" : [ + { + "name" : "api-internal", + "properties" : { + "frontendIPConfiguration" : { + "id" :"[concat(variables('masterLoadBalancerID'), '/frontendIPConfigurations/public-lb-ip')]" + }, + "backendAddressPool" : { + "id" : "[concat(variables('masterLoadBalancerID'), '/backendAddressPools/public-lb-backend')]" + }, + "protocol" : "Tcp", + "loadDistribution" : "Default", + "idleTimeoutInMinutes" : 30, + "frontendPort" : 6443, + "backendPort" : 6443, + "probe" : { + "id" : "[concat(variables('masterLoadBalancerID'), '/probes/api-internal-probe')]" + } + } + } + ], + "probes" : [ + { + "name" : "api-internal-probe", + "properties" : { + "protocol" : "Tcp", + "port" : 6443, + "intervalInSeconds" : 10, + "numberOfProbes" : 3 + } + } + ] + } + }, + { + "apiVersion" : "2018-12-01", + "type" : "Microsoft.Network/loadBalancers", + "name" : "[variables('internalLoadBalancerName')]", + "location" : "[variables('location')]", + "sku": { + "name": "[variables('skuName')]" + }, + "properties" : { + "frontendIPConfigurations" : [ + { + "name" : "internal-lb-ip", + "properties" : { + "privateIPAllocationMethod" : "Dynamic", + "subnet" : { + "id" : "[variables('masterSubnetRef')]" + }, + "privateIPAddressVersion" : "IPv4" + } + } + ], + "backendAddressPools" : [ + { + "name" : "internal-lb-backend" + } + ], + "loadBalancingRules" : [ + { + "name" : "api-internal", + "properties" : { + "frontendIPConfiguration" : { + "id" : "[concat(variables('internalLoadBalancerID'), '/frontendIPConfigurations/internal-lb-ip')]" + }, + "frontendPort" : 6443, + "backendPort" : 6443, + "enableFloatingIP" : false, + "idleTimeoutInMinutes" : 30, + "protocol" : "Tcp", + "enableTcpReset" : false, + "loadDistribution" : "Default", + "backendAddressPool" : { + "id" : "[concat(variables('internalLoadBalancerID'), '/backendAddressPools/internal-lb-backend')]" + }, + "probe" : { + "id" : "[concat(variables('internalLoadBalancerID'), '/probes/api-internal-probe')]" + } + } + }, + { + "name" : "sint", + "properties" : { + "frontendIPConfiguration" : { + "id" : "[concat(variables('internalLoadBalancerID'), '/frontendIPConfigurations/internal-lb-ip')]" + }, + "frontendPort" : 22623, + "backendPort" : 22623, + "enableFloatingIP" : false, + "idleTimeoutInMinutes" : 30, + "protocol" : "Tcp", + "enableTcpReset" : false, + "loadDistribution" : "Default", + "backendAddressPool" : { + "id" : "[concat(variables('internalLoadBalancerID'), '/backendAddressPools/internal-lb-backend')]" + }, + "probe" : { + "id" : "[concat(variables('internalLoadBalancerID'), '/probes/sint-probe')]" + } + } + } + ], + "probes" : [ + { + "name" : "api-internal-probe", + "properties" : { + "protocol" : "Tcp", + "port" : 6443, + "intervalInSeconds" : 10, + "numberOfProbes" : 3 + } + }, + { + "name" : "sint-probe", + "properties" : { + "protocol" : "Tcp", + "port" : 22623, + "intervalInSeconds" : 10, + "numberOfProbes" : 3 + } + } + ] + } + }, + { + "apiVersion": "2018-09-01", + "type": "Microsoft.Network/privateDnsZones/A", + "name": "[concat(parameters('privateDNSZoneName'), '/api')]", + "location" : "[variables('location')]", + "dependsOn" : [ + "[concat('Microsoft.Network/loadBalancers/', variables('internalLoadBalancerName'))]" + ], + "properties": { + "ttl": 60, + "aRecords": [ + { + "ipv4Address": "[reference(variables('internalLoadBalancerName')).frontendIPConfigurations[0].properties.privateIPAddress]" + } + ] + } + }, + { + "apiVersion": "2018-09-01", + "type": "Microsoft.Network/privateDnsZones/A", + "name": "[concat(parameters('privateDNSZoneName'), '/api-int')]", + "location" : "[variables('location')]", + "dependsOn" : [ + "[concat('Microsoft.Network/loadBalancers/', variables('internalLoadBalancerName'))]" + ], + "properties": { + "ttl": 60, + "aRecords": [ + { + "ipv4Address": "[reference(variables('internalLoadBalancerName')).frontendIPConfigurations[0].properties.privateIPAddress]" + } + ] + } + } + ] +} +---- diff --git a/modules/installation-arm-image-storage.adoc b/modules/installation-arm-image-storage.adoc new file mode 100644 index 0000000000..aa9b467059 --- /dev/null +++ b/modules/installation-arm-image-storage.adoc @@ -0,0 +1,55 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-arm-image-storage_{context}"] += ARM template for image storage + +You can use the following Azure Resource Manager (ARM) template to deploy the +stored {op-system-first} image that you need for your {product-title} cluster: + +.`02_storage.json` ARM template +[source,json] +---- +{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "baseName" : { + "type" : "string", + "minLength" : 1, + "metadata" : { + "description" : "Base name to be used in resource names (usually the cluster's Infra ID)" + } + }, + "vhdBlobURL" : { + "type" : "string", + "metadata" : { + "description" : "URL pointing to the blob where the VHD to be used to create master and worker machines is located" + } + } + }, + "variables" : { + "location" : "[resourceGroup().location]", + "imageName" : "[concat(parameters('baseName'), '-image')]" + }, + "resources" : [ + { + "apiVersion" : "2018-06-01", + "type": "Microsoft.Compute/images", + "name": "[variables('imageName')]", + "location" : "[variables('location')]", + "properties": { + "storageProfile": { + "osDisk": { + "osType": "Linux", + "osState": "Generalized", + "blobUri": "[parameters('vhdBlobURL')]", + "storageAccountType": "Standard_LRS" + } + } + } + } + ] +} +---- diff --git a/modules/installation-arm-vnet.adoc b/modules/installation-arm-vnet.adoc new file mode 100644 index 0000000000..c86653627c --- /dev/null +++ b/modules/installation-arm-vnet.adoc @@ -0,0 +1,125 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-arm-vnet_{context}"] += ARM template for the VNet + +You can use the following Azure Resource Manager (ARM) template to deploy the +VNet that you need for your {product-title} cluster: + +.`01_vnet.json` ARM template +[source,json] +---- +{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "baseName" : { + "type" : "string", + "minLength" : 1, + "metadata" : { + "description" : "Base name to be used in resource names (usually the cluster's Infra ID)" + } + } + }, + "variables" : { + "location" : "[resourceGroup().location]", + "virtualNetworkName" : "[concat(parameters('baseName'), '-vnet')]", + "addressPrefix" : "10.0.0.0/16", + "masterSubnetName" : "[concat(parameters('baseName'), '-master-subnet')]", + "masterSubnetPrefix" : "10.0.0.0/24", + "nodeSubnetName" : "[concat(parameters('baseName'), '-worker-subnet')]", + "nodeSubnetPrefix" : "10.0.1.0/24", + "controlPlaneNsgName" : "[concat(parameters('baseName'), '-controlplane-nsg')]", + "nodeNsgName" : "[concat(parameters('baseName'), '-node-nsg')]" + }, + "resources" : [ + { + "apiVersion" : "2018-12-01", + "type" : "Microsoft.Network/virtualNetworks", + "name" : "[variables('virtualNetworkName')]", + "location" : "[variables('location')]", + "dependsOn" : [ + "[concat('Microsoft.Network/networkSecurityGroups/', variables('controlPlaneNsgName'))]", + "[concat('Microsoft.Network/networkSecurityGroups/', variables('nodeNsgName'))]" + ], + "properties" : { + "addressSpace" : { + "addressPrefixes" : [ + "[variables('addressPrefix')]" + ] + }, + "subnets" : [ + { + "name" : "[variables('masterSubnetName')]", + "properties" : { + "addressPrefix" : "[variables('masterSubnetPrefix')]", + "serviceEndpoints": [], + "networkSecurityGroup" : { + "id" : "[resourceId('Microsoft.Network/networkSecurityGroups', variables('controlPlaneNsgName'))]" + } + } + }, + { + "name" : "[variables('nodeSubnetName')]", + "properties" : { + "addressPrefix" : "[variables('nodeSubnetPrefix')]", + "serviceEndpoints": [], + "networkSecurityGroup" : { + "id" : "[resourceId('Microsoft.Network/networkSecurityGroups', variables('nodeNsgName'))]" + } + } + } + ] + } + }, + { + "type" : "Microsoft.Network/networkSecurityGroups", + "name" : "[variables('controlPlaneNsgName')]", + "apiVersion" : "2018-10-01", + "location" : "[variables('location')]", + "properties" : { + "securityRules" : [ + { + "name" : "apiserver_in", + "properties" : { + "protocol" : "Tcp", + "sourcePortRange" : "*", + "destinationPortRange" : "6443", + "sourceAddressPrefix" : "*", + "destinationAddressPrefix" : "*", + "access" : "Allow", + "priority" : 101, + "direction" : "Inbound" + } + } + ] + } + }, + { + "type" : "Microsoft.Network/networkSecurityGroups", + "name" : "[variables('nodeNsgName')]", + "apiVersion" : "2018-10-01", + "location" : "[variables('location')]", + "properties" : { + "securityRules" : [ + { + "name" : "apiserver_in", + "properties" : { + "protocol" : "Tcp", + "sourcePortRange" : "*", + "destinationPortRange" : "6443", + "sourceAddressPrefix" : "*", + "destinationAddressPrefix" : "*", + "access" : "Allow", + "priority" : 101, + "direction" : "Inbound" + } + } + ] + } + } + ] +} +---- diff --git a/modules/installation-arm-worker.adoc b/modules/installation-arm-worker.adoc new file mode 100644 index 0000000000..2de97ef1a0 --- /dev/null +++ b/modules/installation-arm-worker.adoc @@ -0,0 +1,243 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-arm-worker_{context}"] += ARM template for worker machines + +You can use the following Azure Resource Manager (ARM) template to deploy the +worker machines that you need for your {product-title} cluster: + +.`06_workers.json` ARM template +[source,json] +---- +{ + "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "parameters" : { + "baseName" : { + "type" : "string", + "minLength" : 1, + "metadata" : { + "description" : "Base name to be used in resource names (usually the cluster's Infra ID)" + } + }, + "workerIgnition" : { + "type" : "string", + "metadata" : { + "description" : "Ignition content for the worker nodes" + } + }, + "numberOfNodes" : { + "type" : "int", + "defaultValue" : 3, + "minValue" : 2, + "maxValue" : 30, + "metadata" : { + "description" : "Number of OpenShift compute nodes to deploy" + } + }, + "sshKeyData" : { + "type" : "securestring", + "metadata" : { + "description" : "SSH RSA public key file as a string" + } + }, + "nodeVMSize" : { + "type" : "string", + "defaultValue" : "Standard_D4s_v3", + "allowedValues" : [ + "Standard_A2", + "Standard_A3", + "Standard_A4", + "Standard_A5", + "Standard_A6", + "Standard_A7", + "Standard_A8", + "Standard_A9", + "Standard_A10", + "Standard_A11", + "Standard_D2", + "Standard_D3", + "Standard_D4", + "Standard_D11", + "Standard_D12", + "Standard_D13", + "Standard_D14", + "Standard_D2_v2", + "Standard_D3_v2", + "Standard_D4_v2", + "Standard_D5_v2", + "Standard_D8_v3", + "Standard_D11_v2", + "Standard_D12_v2", + "Standard_D13_v2", + "Standard_D14_v2", + "Standard_E2_v3", + "Standard_E4_v3", + "Standard_E8_v3", + "Standard_E16_v3", + "Standard_E32_v3", + "Standard_E64_v3", + "Standard_E2s_v3", + "Standard_E4s_v3", + "Standard_E8s_v3", + "Standard_E16s_v3", + "Standard_E32s_v3", + "Standard_E64s_v3", + "Standard_G1", + "Standard_G2", + "Standard_G3", + "Standard_G4", + "Standard_G5", + "Standard_DS2", + "Standard_DS3", + "Standard_DS4", + "Standard_DS11", + "Standard_DS12", + "Standard_DS13", + "Standard_DS14", + "Standard_DS2_v2", + "Standard_DS3_v2", + "Standard_DS4_v2", + "Standard_DS5_v2", + "Standard_DS11_v2", + "Standard_DS12_v2", + "Standard_DS13_v2", + "Standard_DS14_v2", + "Standard_GS1", + "Standard_GS2", + "Standard_GS3", + "Standard_GS4", + "Standard_GS5", + "Standard_D2s_v3", + "Standard_D4s_v3", + "Standard_D8s_v3" + ], + "metadata" : { + "description" : "The size of the each Node Virtual Machine" + } + } + }, + "variables" : { + "location" : "[resourceGroup().location]", + "virtualNetworkName" : "[concat(parameters('baseName'), '-vnet')]", + "virtualNetworkID" : "[resourceId('Microsoft.Network/virtualNetworks', variables('virtualNetworkName'))]", + "nodeSubnetName" : "[concat(parameters('baseName'), '-worker-subnet')]", + "nodeSubnetRef" : "[concat(variables('virtualNetworkID'), '/subnets/', variables('nodeSubnetName'))]", + "infraLoadBalancerName" : "[parameters('baseName')]", + "sshKeyPath" : "/home/capi/.ssh/authorized_keys", + "identityName" : "[concat(parameters('baseName'), '-identity')]", + "imageName" : "[concat(parameters('baseName'), '-image')]", + "copy" : [ + { + "name" : "vmNames", + "count" : "[parameters('numberOfNodes')]", + "input" : "[concat(parameters('baseName'), '-worker-', variables('location'), '-', copyIndex('vmNames', 1))]" + } + ] + }, + "resources" : [ + { + "apiVersion" : "2019-05-01", + "name" : "[concat('node', copyIndex())]", + "type" : "Microsoft.Resources/deployments", + "copy" : { + "name" : "nodeCopy", + "count" : "[length(variables('vmNames'))]" + }, + "properties" : { + "mode" : "Incremental", + "template" : { + "$schema" : "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion" : "1.0.0.0", + "resources" : [ + { + "apiVersion" : "2018-06-01", + "type" : "Microsoft.Network/networkInterfaces", + "name" : "[concat(variables('vmNames')[copyIndex()], '-nic')]", + "location" : "[variables('location')]", + "properties" : { + "ipConfigurations" : [ + { + "name" : "pipConfig", + "properties" : { + "privateIPAllocationMethod" : "Dynamic", + "subnet" : { + "id" : "[variables('nodeSubnetRef')]" + } + } + } + ] + } + }, + { + "apiVersion" : "2018-06-01", + "type" : "Microsoft.Compute/virtualMachines", + "name" : "[variables('vmNames')[copyIndex()]]", + "location" : "[variables('location')]", + "tags" : { + "kubernetes.io-cluster-ffranzupi": "owned" + }, + "identity" : { + "type" : "userAssigned", + "userAssignedIdentities" : { + "[resourceID('Microsoft.ManagedIdentity/userAssignedIdentities/', variables('identityName'))]" : {} + } + }, + "dependsOn" : [ + "[concat('Microsoft.Network/networkInterfaces/', concat(variables('vmNames')[copyIndex()], '-nic'))]" + ], + "properties" : { + "hardwareProfile" : { + "vmSize" : "[parameters('nodeVMSize')]" + }, + "osProfile" : { + "computerName" : "[variables('vmNames')[copyIndex()]]", + "adminUsername" : "capi", + "customData" : "[parameters('workerIgnition')]", + "linuxConfiguration" : { + "disablePasswordAuthentication" : true, + "ssh" : { + "publicKeys" : [ + { + "path" : "[variables('sshKeyPath')]", + "keyData" : "[parameters('sshKeyData')]" + } + ] + } + } + }, + "storageProfile" : { + "imageReference": { + "id": "[resourceId('Microsoft.Compute/images', variables('imageName'))]" + }, + "osDisk" : { + "name": "[concat(variables('vmNames')[copyIndex()],'_OSDisk')]", + "osType" : "Linux", + "createOption" : "FromImage", + "managedDisk": { + "storageAccountType": "Premium_LRS" + }, + "diskSizeGB": 128 + } + }, + "networkProfile" : { + "networkInterfaces" : [ + { + "id" : "[resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmNames')[copyIndex()], '-nic'))]", + "properties": { + "primary": true + } + } + ] + } + } + } + ] + } + } + } + ] +} +---- diff --git a/modules/installation-azure-create-dns-zones.adoc b/modules/installation-azure-create-dns-zones.adoc new file mode 100644 index 0000000000..b7df8b37fc --- /dev/null +++ b/modules/installation-azure-create-dns-zones.adoc @@ -0,0 +1,45 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-azure-create-dns-zones_{context}"] += Example for creating DNS zones + +DNS records are required for clusters that use user-provisioned infrastructure. +You should choose the DNS strategy that fits your scenario. + +For this example, link:https://docs.microsoft.com/en-us/azure/dns/dns-overview[Azure's DNS solution] +is used, so you will create a new public DNS zone for external (internet) +visibility and a private DNS zone for internal cluster resolution. + +[NOTE] +==== +The public DNS zone is not required to exist in the same resource group as the +cluster deployment and might already exist in your organization for the desired +base domain. If that is the case, you can skip creating the public DNS zone; be +sure the installation config you generated earlier reflects that scenario. +==== + +.Prerequisites + +* Configure an Azure account. + +* Generate the Ignition config files for your cluster. + +.Procedure + +. Create the new public DNS zone in the resource group exported in the +`BASE_DOMAIN_RESOURCE_GROUP` environment variable: ++ +---- +$ az network dns zone create -g ${BASE_DOMAIN_RESOURCE_GROUP} -n ${CLUSTER_NAME}.${BASE_DOMAIN} +---- ++ +You can skip this step if you are using a public DNS zone that already exists. + +. Create the private DNS zone in the same resource group as the rest of this +deployment: ++ +---- +$ az network private-dns zone create -g ${RESOURCE_GROUP} -n ${CLUSTER_NAME}.${BASE_DOMAIN} +---- diff --git a/modules/installation-azure-create-ingress-dns-records.adoc b/modules/installation-azure-create-ingress-dns-records.adoc new file mode 100644 index 0000000000..46be94419b --- /dev/null +++ b/modules/installation-azure-create-ingress-dns-records.adoc @@ -0,0 +1,70 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-azure-create-ingress-dns-records_{context}"] += Adding the Ingress DNS records + +If you removed the DNS Zone configuration when creating Kubernetes manifests and +generating Ignition configs, you must manually create DNS records that point at +the Ingress load balancer. You can create either a wildcard +`*.apps.{baseDomain}.` or specific records. You can use A, CNAME, and other +records per your requirements. + +.Prerequisites + +* You deployed an {product-title} cluster on Microsoft Azure by using infrastructure that you provisioned. +* Install the OpenShift Command-line Interface (CLI), commonly known as `oc`. +* Install the `jq` package. +* Install or update the link:https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-yum?view=azure-cli-latest[Azure CLI]. + +.Procedure + +. Confirm the Ingress router has created a load balancer and populated the +`EXTERNAL-IP` field: ++ +---- +$ oc -n openshift-ingress get service router-default +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +router-default LoadBalancer 172.30.20.10 35.130.120.110 80:32288/TCP,443:31215/TCP 20 +---- + +. Export the Ingress router IP as a variable: ++ +---- +$ export PUBLIC_IP_ROUTER=`oc -n openshift-ingress get service router-default --no-headers | awk '{print $4}'` +---- + +. Add a `*.apps` record to the public DNS zone. + +.. If you are adding this cluster to a new public zone, run: ++ +---- +$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n *.apps -a ${PUBLIC_IP_ROUTER} --ttl 300 +---- + +.. If you are adding this cluster to an already existing public zone, run: ++ +---- +$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z ${BASE_DOMAIN} -n *.apps.${CLUSTER_NAME} -a ${PUBLIC_IP_ROUTER} --ttl 300 +---- + +. Add a `*.apps` record to the private DNS zone: ++ +---- +$ az network private-dns record-set a create -g ${RESOURCE_GROUP} -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n *.apps --ttl 300 +$ az network private-dns record-set a add-record -g ${RESOURCE_GROUP} -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n *.apps -a ${PUBLIC_IP_ROUTER} +---- + +If you prefer to add explicit domains instead of using a wildcard, you can +create entries for each of the cluster's current Routes: + +---- +$ oc get --all-namespaces -o jsonpath='{range .items[*]}{range .status.ingress[*]}{.host}{"\n"}{end}{end}' routes +oauth-openshift.apps.cluster.basedomain.com +console-openshift-console.apps.cluster.basedomain.com +downloads-openshift-console.apps.cluster.basedomain.com +alertmanager-main-openshift-monitoring.apps.cluster.basedomain.com +grafana-openshift-monitoring.apps.cluster.basedomain.com +prometheus-k8s-openshift-monitoring.apps.cluster.basedomain.com +---- diff --git a/modules/installation-azure-create-resource-group-and-identity.adoc b/modules/installation-azure-create-resource-group-and-identity.adoc new file mode 100644 index 0000000000..b2daa93b13 --- /dev/null +++ b/modules/installation-azure-create-resource-group-and-identity.adoc @@ -0,0 +1,49 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-azure-create-resource-group-and-identity_{context}"] += Creating the Azure resource group and identity + +You must create a Microsoft Azure link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/overview#resource-groups[resource group] and an identity for that resource group. +These are both used during the installation of your {product-title} cluster on +Azure. + +.Prerequisites + +* Configure an Azure account. + +* Generate the Ignition config files for your cluster. + +.Procedure + +. Create the resource group in a supported Azure region: ++ +---- +$ az group create --name ${RESOURCE_GROUP} --location ${AZURE_REGION} +---- + +. Create an Azure identity for the resource group: ++ +---- +$ az identity create -g ${RESOURCE_GROUP} -n ${INFRA_ID}-identity +---- ++ +This is used to grant the required access to Operators in your cluster. For +example, this allows the Ingress Operator to create a public IP and its load +balancer. You must assign the Azure identity to a role. + +. Grant the Contributor role to the Azure identity: + +.. Export the following variables required by the Azure role assignment: ++ +---- +$ export PRINCIPAL_ID=`az identity show -g ${RESOURCE_GROUP} -n ${INFRA_ID}-identity --query principalId --out tsv` +$ export RESOURCE_GROUP_ID=`az group show -g ${RESOURCE_GROUP} --query id --out tsv` +---- + +.. Assign the Contributor role to the identity: ++ +---- +$ az role assignment create --assignee "${PRINCIPAL_ID}" --role 'Contributor' --scope "${RESOURCE_GROUP_ID}" +---- diff --git a/modules/installation-azure-increasing-limits.adoc b/modules/installation-azure-increasing-limits.adoc index 9d5c3f5dce..d0d561593b 100644 --- a/modules/installation-azure-increasing-limits.adoc +++ b/modules/installation-azure-increasing-limits.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * installing/installing_azure/installing-azure-account.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc [id="installation-azure-increasing-limits_{context}"] = Increasing Azure account limits diff --git a/modules/installation-azure-limits.adoc b/modules/installation-azure-limits.adoc index d24e23b3c9..b4267e8d6b 100644 --- a/modules/installation-azure-limits.adoc +++ b/modules/installation-azure-limits.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * installing/installing_azure/installing-azure-account.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc [id="installation-azure-limits_{context}"] = Azure account limits @@ -27,9 +28,9 @@ ability to install and run {product-title} clusters. |Component |Number of components required by default| Default Azure limit |Description |vCPU -|34 +|40 |20 per region -|A default cluster requires 34 vCPUs, so you must increase the account limit. +|A default cluster requires 40 vCPUs, so you must increase the account limit. By default, each cluster creates the following instances: @@ -37,10 +38,11 @@ By default, each cluster creates the following instances: * Three control plane machines * Three compute machines -Because the bootstrap machine uses `Standard_D4s_v3` machines, which use 4 vCPUS, +Because the bootstrap machine uses `Standard_D4s_v3` machines, which use 4 vCPUs, the control plane machines use `Standard_D8s_v3` virtual -machines, which use 8 vCPUs, and the worker machines use `Standard_D2s_v3` -virtual machines, which use 2 vCPUs, a default cluster requires 34 vCPUs. +machines, which use 8 vCPUs, and the worker machines use `Standard_D4s_v3` +virtual machines, which use 4 vCPUs, a default cluster requires 40 vCPUs. +The bootstrap node VM, which uses 4 vCPUs, is used only during installation. To deploy more worker nodes, enable autoscaling, deploy large workloads, or use a different instance type, you must further increase the vCPU limit for your diff --git a/modules/installation-azure-network-config.adoc b/modules/installation-azure-network-config.adoc index 2982b6ba0a..8f7963aade 100644 --- a/modules/installation-azure-network-config.adoc +++ b/modules/installation-azure-network-config.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * installing/installing_azure/installing-azure-account.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc [id="installation-azure-network-config_{context}"] = Configuring a public DNS zone in Azure diff --git a/modules/installation-azure-permissions.adoc b/modules/installation-azure-permissions.adoc index 8bec21eef5..4157b7bc00 100644 --- a/modules/installation-azure-permissions.adoc +++ b/modules/installation-azure-permissions.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * installing/installing_azure/installing-azure-account.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc [id="installation-azure-permissions_{context}"] = Required Azure roles @@ -9,6 +10,6 @@ Your Microsoft Azure account must have the following roles for the subscription that you use: * `User Access Administrator` -To set roles on the Azure portal, see the +To set roles on the Azure portal, see the link:https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal[Manage access to Azure resources using RBAC and the Azure portal] in the Azure documentation. diff --git a/modules/installation-azure-regions.adoc b/modules/installation-azure-regions.adoc index f2c11487d0..c49314c367 100644 --- a/modules/installation-azure-regions.adoc +++ b/modules/installation-azure-regions.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * installing/installing_azure/installing-azure-account.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc [id="installation-azure-regions_{context}"] = Supported Azure regions diff --git a/modules/installation-azure-service-principal.adoc b/modules/installation-azure-service-principal.adoc index 4213baa0b4..83d955f288 100644 --- a/modules/installation-azure-service-principal.adoc +++ b/modules/installation-azure-service-principal.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * installing/installing_azure/installing-azure-account.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc [id="installation-azure-service-principal_{context}"] = Creating a service principal diff --git a/modules/installation-azure-user-infra-completing.adoc b/modules/installation-azure-user-infra-completing.adoc new file mode 100644 index 0000000000..a1e5b45e4a --- /dev/null +++ b/modules/installation-azure-user-infra-completing.adoc @@ -0,0 +1,32 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-azure-user-infra-completing_{context}"] += Completing an Azure installation on user-provisioned infrastructure + +After you start the {product-title} installation on Microsoft Azure +user-provisioned infrastructure, you can monitor the cluster events until the +cluster is ready. + +.Prerequisites + +* Deploy the bootstrap machine for an {product-title} cluster on user-provisioned Azure infrastructure. +* Install the `oc` CLI and log in. + +.Procedure + +* Complete the cluster installation: ++ +---- +$ ./openshift-install --dir= wait-for install-complete <1> + +INFO Waiting up to 30m0s for the cluster to initialize... +---- +<1> For ``, specify the path to the directory that you +stored the installation files in. ++ +[IMPORTANT] +==== +The Ignition config files that the installation program generates contain certificates that expire after 24 hours. You must keep the cluster running for 24 hours in a non-degraded state to ensure that the first certificate rotation has finished. +==== diff --git a/modules/installation-azure-user-infra-deploying-rhcos.adoc b/modules/installation-azure-user-infra-deploying-rhcos.adoc new file mode 100644 index 0000000000..53413fe99e --- /dev/null +++ b/modules/installation-azure-user-infra-deploying-rhcos.adoc @@ -0,0 +1,42 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-azure-user-infra-deploying-rhcos_{context}"] += Deploying the {op-system} cluster image for the Azure infrastructure + +You must use a valid {op-system-first} image for Microsoft Azure for your +{product-title} nodes. + +.Prerequisites + +* Configure an Azure account. + +* Generate the Ignition config files for your cluster. + +* Store the {op-system} virtual hard disk (VHD) cluster image in an Azure storage container. + +* Store the bootstrap ignition config file in an Azure storage container. + +.Procedure + +. Copy the template from the *ARM template for image storage* section of +this topic and save it as `02_storage.json` in your cluster's installation directory. This template +describes the image storage that your cluster requires. + +. Export the {op-system} VHD blob URL as a variable: ++ +---- +$ export VHD_BLOB_URL=`az storage blob url --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} -c vhd -n "rhcos.vhd" -o tsv` +---- + +. Deploy the cluster image: ++ +---- +$ az deployment group create -g ${RESOURCE_GROUP} \ + --template-file "/02_storage.json" \ + --parameters vhdBlobURL="${VHD_BLOB_URL}" \ <1> + --parameters baseName="${INFRA_ID}"<2> +---- +<1> The blob URL of the {op-system} VHD to be used to create master and worker machines. +<2> The base name to be used in resource names; this is usually the cluster's Infra ID. diff --git a/modules/installation-azure-user-infra-uploading-rhcos.adoc b/modules/installation-azure-user-infra-uploading-rhcos.adoc new file mode 100644 index 0000000000..c9edf4b9cb --- /dev/null +++ b/modules/installation-azure-user-infra-uploading-rhcos.adoc @@ -0,0 +1,81 @@ +// Module included in the following assemblies: +// +// * installing/installing_gcp/installing-azure-user-infra.adoc + +[id="installation-azure-user-infra-uploading-rhcos_{context}"] += Uploading the {op-system} cluster image and bootstrap ignition config file + +The Azure client does not support deployments based on files existing locally; +therefore, you must copy and store the {op-system} virtual hard disk (VHD) +cluster image and bootstrap ignition config file in a storage container so they +are accessible during deployment. + +.Prerequisites + +* Configure an Azure account. + +* Generate the Ignition config files for your cluster. + +.Procedure + +. Create an Azure storage account to store the VHD cluster image: ++ +---- +$ az storage account create -g ${RESOURCE_GROUP} --location ${AZURE_REGION} --name ${CLUSTER_NAME}sa --kind Storage --sku Standard_LRS +---- ++ +[WARNING] +==== +The Azure storage account name must be between 3 and 24 characters in length and +use numbers and lower-case letters only. If your `CLUSTER_NAME` variable does +not follow these restrictions, you must manually define the Azure storage +account name. For more information on Azure storage account name restrictions, +see link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/error-storage-account-name[Resolve errors for storage account names] +in the Azure documentation. +==== + +. Export the storage account key as an environment variable: ++ +---- +$ export ACCOUNT_KEY=`az storage account keys list -g ${RESOURCE_GROUP} --account-name ${CLUSTER_NAME}sa --query "[0].value" -o tsv` +---- + +. Choose the {op-system} version to use and export the URL of its VHD to an +environment variable: ++ +---- +$ export VHD_URL=`curl -s https://raw.githubusercontent.com/openshift/installer/release-4.4/data/data/rhcos.json | jq -r .azure.url` +---- ++ +[IMPORTANT] +==== +The {op-system} images might not change with every release of {product-title}. +You must specify an image with the highest version that is +less than or equal to the {product-title} version that you install. Use the image version +that matches your {product-title} version if it is available. +==== + +. Copy the chosen VHD to a blob: ++ +---- +$ az storage container create --name vhd --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} +$ az storage blob copy start --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} --destination-blob "rhcos.vhd" --destination-container vhd --source-uri "${VHD_URL}" +---- ++ +To track the progress of the VHD copy task, run this script: ++ +---- +status="unknown" +while [ "$status" != "success" ] +do + status=`az storage blob show --container-name vhd --name "rhcos.vhd" --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} -o tsv --query properties.copy.status` + echo $status +done +---- + +. Create a blob storage container and upload the generated `bootstrap.ign` file: ++ +---- +$ az storage container create --name files --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} --public-access blob +$ az storage blob upload --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} -c "files" -f "/bootstrap.ign" -n "bootstrap.ign" +---- diff --git a/modules/installation-azure-user-infra-wait-for-bootstrap.adoc b/modules/installation-azure-user-infra-wait-for-bootstrap.adoc new file mode 100644 index 0000000000..5787268ec9 --- /dev/null +++ b/modules/installation-azure-user-infra-wait-for-bootstrap.adoc @@ -0,0 +1,50 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-azure-user-infra-wait-for-bootstrap_{context}"] += Wait for bootstrap completion and remove bootstrap resources in Azure + +After you create all of the required infrastructure in Microsoft Azure, wait for +the bootstrap process to complete on the machines that you provisioned by using +the Ignition config files that you generated with the installation program. + +.Prerequisites + +* Configure an Azure account. +* Generate the Ignition config files for your cluster. +* Create and configure a VNet and associated subnets in Azure. +* Create and configure networking and load balancers in Azure. +* Create control plane and compute roles. +* Create the bootstrap machine. +* Create the control plane machines. + +.Procedure + +. Change to the directory that contains the installation program and run the +following command: ++ +---- +$ ./openshift-install wait-for bootstrap-complete --dir= \ <1> + --log-level info <2> +---- +<1> For ``, specify the path to the directory that you +stored the installation files in. +<2> To view different installation details, specify `warn`, `debug`, or +`error` instead of `info`. ++ +If the command exits without a `FATAL` warning, your production control plane +has initialized. + +. Delete the bootstrap resources: ++ +---- +$ az network nsg rule delete -g ${RESOURCE_GROUP} --nsg-name ${INFRA_ID}-controlplane-nsg --name bootstrap_ssh_in +$ az vm stop -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap +$ az vm deallocate -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap +$ az vm delete -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap --yes +$ az disk delete -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap_OSDisk --no-wait --yes +$ az network nic delete -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap-nic --no-wait +$ az storage blob delete --account-key ${ACCOUNT_KEY} --account-name ${CLUSTER_NAME}sa --container-name files --name bootstrap.ign +$ az network public-ip delete -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap-ssh-pip +---- diff --git a/modules/installation-configure-proxy.adoc b/modules/installation-configure-proxy.adoc index fc9ad0839a..fd96116ef6 100644 --- a/modules/installation-configure-proxy.adoc +++ b/modules/installation-configure-proxy.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * installing/installing_aws/installing-aws-user-infra.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc // * installing/installing_gcp/installing-gcp-user-infra.adoc // * installing/installing_gcp/installing-restricted-networks-gcp.adoc // * installing/installing_bare_metal/installing-bare-metal.adoc diff --git a/modules/installation-creating-azure-bootstrap.adoc b/modules/installation-creating-azure-bootstrap.adoc new file mode 100644 index 0000000000..5461cd326b --- /dev/null +++ b/modules/installation-creating-azure-bootstrap.adoc @@ -0,0 +1,52 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-creating-azure-bootstrap_{context}"] += Creating the bootstrap machine in Azure + +You must create the bootstrap machine in Microsoft Azure to use during +{product-title} cluster initialization. One way to create this machine is to +modify the provided Azure Resource Manager (ARM) template. + +[NOTE] +==== +If you do not use the provided ARM template to create your bootstrap machine, +you must review the provided information and manually create the infrastructure. +If your cluster does not initialize correctly, you might have to contact Red Hat +support with your installation logs. +==== + +.Prerequisites + +* Configure an Azure account. +* Generate the Ignition config files for your cluster. +* Create and configure a VNet and associated subnets in Azure. +* Create and configure networking and load balancers in Azure. +* Create control plane and compute roles. + +.Procedure + +. Copy the template from the *ARM template for the bootstrap machine* section of +this topic and save it as `04_bootstrap.json` in your cluster's installation directory. This template +describes the bootstrap machine that your cluster requires. + +. Export the following variables required by the bootstrap machine deployment: ++ +---- +$ export BOOTSTRAP_URL=`az storage blob url --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} -c "files" -n "bootstrap.ign" -o tsv` +$ export BOOTSTRAP_IGNITION=`jq -rcnM --arg v "2.2.0" --arg url ${BOOTSTRAP_URL} '{ignition:{version:$v,config:{replace:{source:$url}}}}' | base64 -w0` +---- + +. Create the deployment by using the `az` CLI: ++ +---- +$ az deployment group create -g ${RESOURCE_GROUP} \ + --template-file "/04_bootstrap.json" \ + --parameters bootstrapIgnition="${BOOTSTRAP_IGNITION}" \ <1> + --parameters sshKeyData="${SSH_KEY}" \ <2> + --parameters baseName="${INFRA_ID}" <3> +---- +<1> The bootstrap ignition content for the bootstrap cluster. +<2> The SSH RSA public key file as a string. +<3> The base name to be used in resource names; this is usually the cluster's Infra ID. diff --git a/modules/installation-creating-azure-control-plane.adoc b/modules/installation-creating-azure-control-plane.adoc new file mode 100644 index 0000000000..1079aea5b2 --- /dev/null +++ b/modules/installation-creating-azure-control-plane.adoc @@ -0,0 +1,55 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-creating-azure-control-plane_{context}"] += Creating the control plane machines in Azure + +You must create the control plane machines in Microsoft Azure for your cluster +to use. One way to create these machines is to modify the provided Azure +Resource Manager (ARM) template. + +[NOTE] +==== +If you do not use the provided ARM template to create your control plane +machines, you must review the provided information and manually create the +infrastructure. If your cluster does not initialize correctly, you might have to +contact Red Hat support with your installation logs. +==== + +.Prerequisites + +* Configure an Azure account. +* Generate the Ignition config files for your cluster. +* Create and configure a VNet and assoicated subnets in Azure. +* Create and configure networking and load balancers in Azure. +* Create control plane and compute roles. +* Create the bootstrap machine. + +.Procedure + +. Copy the template from the *ARM template for control plane machines* +section of this topic and save it as `05_masters.json` in your cluster's installation directory. +This template describes the control plane machines that your cluster requires. + +. Export the following variable needed by the control plane machine deployment: ++ +---- +$ export MASTER_IGNITION=`cat /master.ign | base64` +---- + +. Create the deployment by using the `az` CLI: ++ +---- +$ az deployment group create -g ${RESOURCE_GROUP} \ + --template-file "/05_masters.json" \ + --parameters masterIgnition="${MASTER_IGNITION}" \ <1> + --parameters sshKeyData="${SSH_KEY}" \ <2> + --parameters privateDNSZoneName="${CLUSTER_NAME}.${BASE_DOMAIN}"<3> + --parameters baseName="${INFRA_ID}"<4> +---- +<1> The ignition content for the master nodes. +<2> The SSH RSA public key file as a string. +<3> The name of the private DNS zone to which the master nodes are attached. +<4> The base name to be used in resource names; this is usually the cluster’s Infra ID. + diff --git a/modules/installation-creating-azure-dns.adoc b/modules/installation-creating-azure-dns.adoc new file mode 100644 index 0000000000..595ba830cc --- /dev/null +++ b/modules/installation-creating-azure-dns.adoc @@ -0,0 +1,65 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-creating-azure-dns_{context}"] += Creating networking and load balancing components in Azure + +You must configure networking and load balancing in Microsoft Azure for your +{product-title} cluster to use. One way to create these components is +to modify the provided Azure Resource Manager (ARM) template. + +[NOTE] +==== +If you do not use the provided ARM template to create your Azure infrastructure, +you must review the provided information and manually create the infrastructure. +If your cluster does not initialize correctly, you might have to contact Red Hat +support with your installation logs. +==== + +.Prerequisites + +* Configure an Azure account. +* Generate the Ignition config files for your cluster. +* Create and configure a VNet and associated subnets in Azure. + +.Procedure + +. Copy the template from the *ARM template for the network and load balancers* +section of this topic and save it as `03_infra.json` in your cluster's installation directory. This +template describes the networking and load balancing objects that your cluster +requires. + +. Create the deployment by using the `az` CLI: ++ +---- +$ az deployment group create -g ${RESOURCE_GROUP} \ + --template-file "/03_infra.json" \ + --parameters privateDNSZoneName="${CLUSTER_NAME}.${BASE_DOMAIN}" \ <1> + --parameters baseName="${INFRA_ID}"<2> +---- +<1> The name of the private DNS zone. +<2> The base name to be used in resource names; this is usually the cluster's Infra ID. + +. Create an `api` DNS record in the public zone for the API public load +balancer. The `${BASE_DOMAIN_RESOURCE_GROUP}` variable must point to the +resource group where the public DNS zone exists. + +.. Export the following variable: ++ +---- +$ export PUBLIC_IP=`az network public-ip list -g ${RESOURCE_GROUP} --query "[?name=='${INFRA_ID}-master-pip'] | [0].ipAddress" -o tsv` +---- + +.. Create the DNS record in a new public zone: ++ +---- +$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n api -a ${PUBLIC_IP} --ttl 60 +---- + +.. If you are adding the cluster to an existing public zone, you can create the DNS +record in it instead: ++ +---- +$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z ${BASE_DOMAIN} -n api.${CLUSTER_NAME} -a ${PUBLIC_IP} --ttl 60 +---- diff --git a/modules/installation-creating-azure-vnet.adoc b/modules/installation-creating-azure-vnet.adoc new file mode 100644 index 0000000000..d02aea015b --- /dev/null +++ b/modules/installation-creating-azure-vnet.adoc @@ -0,0 +1,45 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-creating-azure-vnet_{context}"] += Creating a VNet in Azure + +You must create a virtual network (VNet) in Microsoft Azure for your +{product-title} cluster to use. You can customize the VNet to meet your +requirements. One way to create the VNet is to modify the provided Azure +Resource Manager (ARM) template. + +[NOTE] +==== +If you do not use the provided ARM template to create your Azure infrastructure, +you must review the provided information and manually create the infrastructure. +If your cluster does not initialize correctly, you might have to contact Red Hat +support with your installation logs. +==== + +.Prerequisites + +* Configure an Azure account. +* Generate the Ignition config files for your cluster. + +.Procedure + +. Copy the template from the *ARM template for the VNet* section of this topic +and save it as `01_vnet.json` in your cluster's installation directory. This template describes the +VNet that your cluster requires. + +. Create the deployment by using the `az` CLI: ++ +---- +$ az deployment group create -g ${RESOURCE_GROUP} \ + --template-file "/01_vnet.json" \ + --parameters baseName="${INFRA_ID}"<1> +---- +<1> The base name to be used in resource names; this is usually the cluster's Infra ID. + +. Link the VNet template to the private DNS zone: ++ +---- +$ az network private-dns link vnet create -g ${RESOURCE_GROUP} -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n ${INFRA_ID}-network-link -v "${INFRA_ID}-vnet" -e false +---- diff --git a/modules/installation-creating-azure-worker.adoc b/modules/installation-creating-azure-worker.adoc new file mode 100644 index 0000000000..50631adc19 --- /dev/null +++ b/modules/installation-creating-azure-worker.adoc @@ -0,0 +1,58 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-creating-azure-worker_{context}"] += Creating additional worker machines in Azure + +You can create worker machines in Microsoft Azure for your cluster +to use by launching individual instances discretely or by automated processes +outside the cluster, such as Auto Scaling Groups. You can also take advantage of +the built-in cluster scaling mechanisms and the machine API in {product-title}. + +In this example, you manually launch one instance by using the Azure Resource +Manager (ARM) template. Additional instances can be launched by including +additional resources of type `06_workers.json` in the file. + +[NOTE] +==== +If you do not use the provided ARM template to create your worker machines, you +must review the provided information and manually create the infrastructure. If +your cluster does not initialize correctly, you might have to contact Red Hat +support with your installation logs. +==== + +.Prerequisites + +* Configure an Azure account. +* Generate the Ignition config files for your cluster. +* Create and configure a VNet and associated subnets in Azure. +* Create and configure networking and load balancers in Azure. +* Create control plane and compute roles. +* Create the bootstrap machine. +* Create the control plane machines. + +.Procedure + +. Copy the template from the *ARM template for worker machines* +section of this topic and save it as `06_workers.json` in your cluster's installation directory. This +template describes the worker machines that your cluster requires. + +. Export the following variable needed by the worker machine deployment: ++ +---- +$ export WORKER_IGNITION=`cat /worker.ign | base64` +---- + +. Create the deployment by using the `az` CLI: ++ +---- +$ az deployment group create -g ${RESOURCE_GROUP} \ + --template-file "/06_workers.json" \ + --parameters workerIgnition="${WORKER_IGNITION}" \ <1> + --parameters sshKeyData="${SSH_KEY}" \ <2> + --parameters baseName="${INFRA_ID}" <3> +---- +<1> The ignition content for the worker nodes. +<2> The SSH RSA public key file as a string. +<3> The base name to be used in resource names; this is usually the cluster's Infra ID. diff --git a/modules/installation-extracting-infraid.adoc b/modules/installation-extracting-infraid.adoc index d99100c640..241c5a40fa 100644 --- a/modules/installation-extracting-infraid.adoc +++ b/modules/installation-extracting-infraid.adoc @@ -2,6 +2,7 @@ // // * installing/installing_aws/installing-aws-user-infra.adoc // * installing/installing_aws/installing-restricted-networks-aws.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc // * installing/installing_gcp/installing-gcp-user-infra.adoc // * installing/installing_gcp/installing-restricted-networks-gcp.adoc @@ -9,30 +10,50 @@ ifeval::["{context}" == "installing-aws-user-infra"] :cp-first: Amazon Web Services :cp: AWS :cp-template: CloudFormation +:aws: endif::[] ifeval::["{context}" == "installing-restricted-networks-aws"] :cp-first: Amazon Web Services :cp: AWS :cp-template: CloudFormation +:aws: +endif::[] +ifeval::["{context}" == "installing-azure-user-infra"] +:cp-first: Microsoft Azure +:cp: Azure +:cp-template-first: Azure Resource Manager +:cp-template: ARM +:azure: endif::[] ifeval::["{context}" == "installing-gcp-user-infra"] :cp-first: Google Cloud Platform :cp: GCP :cp-template: Deployment Manager +:gcp: endif::[] ifeval::["{context}" == "installing-restricted-networks-gcp"] :cp-first: Google Cloud Platform :cp: GCP :cp-template: Deployment Manager +:gcp: endif::[] [id="installation-extracting-infraid_{context}"] = Extracting the infrastructure name +ifdef::aws,gcp[] The Ignition configs contain a unique cluster identifier that you can use to uniquely identify your cluster in {cp-first} ({cp}). The provided {cp-template} templates contain references to this infrastructure name, so you must extract it. +endif::aws,gcp[] + +ifdef::azure[] +The Ignition configs contain a unique cluster identifier that you can use to +uniquely identify your cluster in {cp-first}. The provided {cp-template-first} ({cp-template}) +templates contain references to this infrastructure name, so you must extract +it. +endif::azure[] .Prerequisites @@ -57,19 +78,30 @@ ifeval::["{context}" == "installing-aws-user-infra"] :!cp-first: :!cp: :!cp-template: +:!aws: endif::[] ifeval::["{context}" == "installing-restricted-networks-aws"] :!cp-first: :!cp: :!cp-template: +:!aws: +endif::[] +ifeval::["{context}" == "installing-azure-user-infra"] +:!cp-first: +:!cp: +:!cp-template-first: +:!cp-template: +:!azure: endif::[] ifeval::["{context}" == "installing-gcp-user-infra"] :!cp-first: :!cp: :!cp-template: +:!gcp: endif::[] ifeval::["{context}" == "installing-restricted-networks-gcp"] :!cp-first: :!cp: :!cp-template: +:!gcp: endif::[] diff --git a/modules/installation-initializing.adoc b/modules/installation-initializing.adoc index 2383757b33..14973bfa20 100644 --- a/modules/installation-initializing.adoc +++ b/modules/installation-initializing.adoc @@ -8,6 +8,7 @@ // * installing/installing_azure/installing-azure-network-customizations // * installing/installing_azure/installing-azure-private.adoc // * installing/installing_azure/installing-azure-vnet.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc // * installing/installing_gcp/installing-gcp-customizations.adoc // * installing/installing_gcp/installing-gcp-private.adoc // * installing/installing_gcp/installing-gcp-network-customizations.adoc @@ -37,6 +38,9 @@ endif::[] ifeval::["{context}" == "installing-azure-vnet"] :azure: endif::[] +ifeval::["{context}" == "installing-azure-user-infra"] +:azure: +endif::[] ifeval::["{context}" == "installing-gcp-customizations"] :gcp: endif::[] @@ -222,6 +226,9 @@ endif::[] ifeval::["{context}" == "installing-azure-vnet"] :!azure: endif::[] +ifeval::["{context}" == "installing-azure-user-infra"] +:!azure: +endif::[] ifeval::["{context}" == "installing-gcp-customizations"] :!gcp: endif::[] diff --git a/modules/installation-obtaining-installer.adoc b/modules/installation-obtaining-installer.adoc index 6118811707..cd59c17318 100644 --- a/modules/installation-obtaining-installer.adoc +++ b/modules/installation-obtaining-installer.adoc @@ -10,6 +10,7 @@ // * installing/installing_azure/installing-azure-default.adoc // * installing/installing_azure/installing-azure-private.adoc // * installing/installing_azure/installing-azure-vnet.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc // * installing/installing_bare_metal/installing-bare-metal.adoc // * installing/installing_gcp/installing-gcp-customizations.adoc // * installing/installing_gcp/installing-gcp-private.adoc diff --git a/modules/installation-user-infra-exporting-common-variables-arm-templates.adoc b/modules/installation-user-infra-exporting-common-variables-arm-templates.adoc new file mode 100644 index 0000000000..7845fb38a9 --- /dev/null +++ b/modules/installation-user-infra-exporting-common-variables-arm-templates.adoc @@ -0,0 +1,55 @@ +// Module included in the following assemblies: +// +// * installing/installing_azure/installing-azure-user-infra.adoc + +[id="installation-user-infra-exporting-common-variables-arm-templates_{context}"] += Exporting common variables for ARM templates + +You must export a common set of variables that are used with the provided Azure +Resource Manager (ARM) templates used to assist in completing a user-provided +infrastructure install on Microsoft Azure. + +[NOTE] +==== +Specific ARM templates can also require additional exported variables, which are +detailed in their related procedures. +==== + +.Prerequisites + +* Obtain the {product-title} installation program and the pull secret for your cluster. + +.Procedure + +. Export common variables found in the `install-config.yaml` to be used by the +provided ARM templates: ++ +---- +$ export CLUSTER_NAME=<1> +$ export AZURE_REGION=<2> +$ export SSH_KEY=<3> +$ export BASE_DOMAIN=<4> +$ export BASE_DOMAIN_RESOURCE_GROUP=<5> +---- +<1> The value of the `.metadata.name` attribute from the `install-config.yaml` file. +<2> The region to deploy the cluster into, for example `centralus`. This is the value of the `.platform.azure.region` attribute from the `install-config.yaml` file. +<3> The SSH RSA public key file as a string. You must enclose the SSH key in quotes since it contains spaces. This is the value of the `.sshKey` attribute from the `install-config.yaml` file. +<4> The base domain to deploy the cluster to. The base domain corresponds to the public DNS zone that you created for your cluster. This is the value of the `.baseDomain` attribute from the `install-config.yaml` file. +<5> The resource group where the public DNS zone exists. This is the value of the `.platform.azure.baseDomainResourceGroupName` attribute from the `install-config.yaml` file. ++ +For example: ++ +---- +$ export CLUSTER_NAME=test-cluster +$ export AZURE_REGION=centralus +$ export SSH_KEY="ssh-rsa xxx/xxx/xxx= user@email.com" +$ export BASE_DOMAIN=example.com +$ export BASE_DOMAIN_RESOURCE_GROUP=ocp-cluster +---- + +. Export the kubeadmin credentials: ++ +---- +$ export KUBECONFIG=/auth/kubeconfig <1> +---- +<1> For ``, specify the path to the directory that you stored the installation files in. diff --git a/modules/installation-user-infra-generate-k8s-manifest-ignition.adoc b/modules/installation-user-infra-generate-k8s-manifest-ignition.adoc index 0a1196375e..f90d6864e2 100644 --- a/modules/installation-user-infra-generate-k8s-manifest-ignition.adoc +++ b/modules/installation-user-infra-generate-k8s-manifest-ignition.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * installing/installing_aws/installing-aws-user-infra.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc // * installing/installing_bare_metal/installing-bare-metal.adoc // * installing/installing_gcp/installing-gcp-user-infra.adoc // * installing/installing_gcp/installing-restricted-networks-gcp.adoc @@ -18,6 +19,10 @@ ifeval::["{context}" == "installing-restricted-networks-aws"] :aws: :restricted: endif::[] +ifeval::["{context}" == "installing-azure-user-infra"] +:azure: +:azure-user-infra: +endif::[] ifeval::["{context}" == "installing-restricted-networks-vsphere"] :restricted: endif::[] @@ -69,37 +74,37 @@ contains the `install-config.yaml` file you created. Because you create your own compute machines later in the installation process, you can safely ignore this warning. -ifdef::aws,gcp[] +ifdef::aws,azure,gcp[] . Remove the Kubernetes manifest files that define the control plane machines: + ---- -$ rm -f openshift/99_openshift-cluster-api_master-machines-*.yaml +$ rm -f /openshift/99_openshift-cluster-api_master-machines-*.yaml ---- + By removing these files, you prevent the cluster from automatically generating control plane machines. -endif::aws,gcp[] +endif::aws,azure,gcp[] ifdef::gcp[] . Optional: If you do not want the cluster to provision compute machines, remove the Kubernetes manifest files that define the worker machines: endif::gcp[] -ifdef::aws[] +ifdef::aws,azure[] . Remove the Kubernetes manifest files that define the worker machines: -endif::aws[] -ifdef::aws,gcp[] +endif::aws,azure[] +ifdef::aws,azure,gcp[] + ---- -$ rm -f openshift/99_openshift-cluster-api_worker-machineset-*.yaml +$ rm -f /openshift/99_openshift-cluster-api_worker-machineset-*.yaml ---- + Because you create and manage the worker machines yourself, you do not need to initialize these machines. -endif::aws,gcp[] +endif::aws,azure,gcp[] -. Modify the `manifests/cluster-scheduler-02-config.yml` Kubernetes manifest file to prevent Pods from being scheduled on the control plane machines: +. Modify the `/manifests/cluster-scheduler-02-config.yml` Kubernetes manifest file to prevent Pods from being scheduled on the control plane machines: + -- -.. Open the `manifests/cluster-scheduler-02-config.yml` file. +.. Open the `/manifests/cluster-scheduler-02-config.yml` file. .. Locate the `mastersSchedulable` parameter and set its value to `False`. .. Save and exit the file. -- @@ -109,11 +114,11 @@ endif::aws,gcp[] Currently, due to a link:https://github.com/kubernetes/kubernetes/issues/65618[Kubernetes limitation], router Pods running on control plane machines will not be reachable by the ingress load balancer. This step might not be required in a future minor version of {product-title}. ==== -ifdef::gcp,aws[] +ifdef::gcp,aws,azure[] . Optional: If you do not want link:https://github.com/openshift/cluster-ingress-operator[the Ingress Operator] to create DNS records on your behalf, remove the `privateZone` and `publicZone` -sections from the `manifests/cluster-dns-02-config.yml` DNS configuration file: +sections from the `/manifests/cluster-dns-02-config.yml` DNS configuration file: + [source,yaml] ---- @@ -133,7 +138,20 @@ status: {} <1> Remove these sections completely. + If you do so, you must add ingress DNS records manually in a later step. -endif::gcp,aws[] +endif::gcp,aws,azure[] + +ifdef::azure-user-infra[] +. When configuring Azure on user-provisioned infrastructure, you must export +some common variables defined in the manifest files to use later in the Azure +Resource Manager (ARM) templates: ++ +---- +$ export INFRA_ID=<1> +$ export RESOURCE_GROUP=<2> +---- +<1> The {product-title} cluster has been assigned an identifier (`INFRA_ID`) in the form of `-`. This will be used as the base name for most resources created using the provided ARM templates. This is the value of the `.status.infrastructureName` attribute from the `manifests/cluster-infrastructure-02-config.yml` file. +<2> All resources created in this Azure deployment exists as part of a link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/overview#resource-groups[resource group]. The resource group name is also based on the `INFRA_ID`, in the form of `--rg`. This is the value of the `.status.platformStatus.azure.resourceGroupName` attribute from the `manifests/cluster-infrastructure-02-config.yml` file. +endif::azure-user-infra[] . Obtain the Ignition config files: + @@ -162,6 +180,10 @@ endif::[] ifeval::["{context}" == "installing-aws-user-infra"] :!aws: endif::[] +ifeval::["{context}" == "installing-azure-user-infra"] +:!azure: +:!azure-user-infra: +endif::[] ifeval::["{context}" == "installing-gcp-user-infra"] :!gcp: endif::[] diff --git a/modules/installation-user-infra-generate.adoc b/modules/installation-user-infra-generate.adoc index f3c73205a3..a01e183302 100644 --- a/modules/installation-user-infra-generate.adoc +++ b/modules/installation-user-infra-generate.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * installing/installing_aws/installing-aws-user-infra.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc // * installing/installing_gcp/installing-gcp-user-infra.adoc // * installing/installing_aws/installing-restricted-networks-aws.adoc // * installing/installing_gcp/installing-restricted-networks-gcp.adoc @@ -9,43 +10,70 @@ ifeval::["{context}" == "installing-restricted-networks-aws"] :restricted: :cp-first: Amazon Web Services :cp: AWS +:aws: endif::[] ifeval::["{context}" == "installing-aws-user-infra"] :cp-first: Amazon Web Services :cp: AWS +:aws: +endif::[] +ifeval::["{context}" == "installing-azure-user-infra"] +:cp-first: Microsoft Azure +:cp: Azure +:azure: endif::[] ifeval::["{context}" == "installing-gcp-user-infra"] :cp-first: Google Cloud Platform :cp: GCP +:gcp: endif::[] ifeval::["{context}" == "installing-restricted-networks-gcp"] :cp-first: Google Cloud Platform :cp: GCP +:gcp: endif::[] [id="installation-user-infra-generate_{context}"] = Creating the installation files for {cp} +ifdef::azure[] +To install {product-title} on {cp-first} using user-provisioned +infrastructure, you must generate the files that the installation +program needs to deploy your cluster and modify them so that the cluster creates +only the machines that it will use. You generate and customize the +`install-config.yaml` file, Kubernetes manifests, and Ignition config files. +endif::azure[] +ifdef::aws,gcp[] To install {product-title} on {cp-first} ({cp}) using user-provisioned infrastructure, you must generate the files that the installation program needs to deploy your cluster and modify them so that the cluster creates only the machines that it will use. You generate and customize the `install-config.yaml` file, Kubernetes manifests, and Ignition config files. +endif::aws,gcp[] ifeval::["{context}" == "installing-restricted-networks-aws"] :!restricted: :!cp-first: :!cp: +:!aws: endif::[] ifeval::["{context}" == "installing-aws-user-infra"] :!cp-first: :!cp: +:!aws: +endif::[] +ifeval::["{context}" == "installing-azure-user-infra"] +:!cp-first: +:!cp: +:!azure: endif::[] ifeval::["{context}" == "installing-gcp-user-infra"] :!cp-first: :!cp: +:!gcp: endif::[] ifeval::["{context}" == "installing-restricted-networks-gcp"] :!cp-first: :!cp: +:!gcp: endif::[] diff --git a/modules/ssh-agent-using.adoc b/modules/ssh-agent-using.adoc index 28ae785db4..54194288cd 100644 --- a/modules/ssh-agent-using.adoc +++ b/modules/ssh-agent-using.adoc @@ -10,6 +10,7 @@ // * installing/installing_azure/installing-azure-default.adoc // * installing/installing_azure/installing-azure-private.adoc // * installing/installing_azure/installing-azure-vnet.adoc +// * installing/installing_azure/installing-azure-user-infra.adoc // * installing/installing_bare_metal/installing-bare-metal.adoc // * installing/installing_gcp/installing-gcp-customizations.adoc // * installing/installing_gcp/installing-gcp-private.adoc @@ -42,6 +43,9 @@ endif::[] ifeval::["{context}" == "installing-aws-user-infra"] :user-infra: endif::[] +ifeval::["{context}" == "installing-azure-user-infra"] +:user-infra: +endif::[] ifeval::["{context}" == "installing-openstack-installer-custom"] :osp: endif::[] @@ -143,6 +147,9 @@ endif::[] ifeval::["{context}" == "installing-aws-user-infra"] :!user-infra: endif::[] +ifeval::["{context}" == "installing-azure-user-infra"] +:!user-infra: +endif::[] ifeval::["{context}" == "installing-openstack-installer-custom"] :!osp: endif::[]