mirror of
https://github.com/openshift/installer.git
synced 2026-02-05 15:47:14 +01:00
Remove pkg/terraform
Removes pkg/terraform, which is no longer needed.
This commit is contained in:
@@ -1,94 +0,0 @@
|
||||
package terraform
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/openshift/installer/pkg/diagnostics"
|
||||
)
|
||||
|
||||
// diagnoseApplyError accepts an error from terraform runs and tries to diagnose the
|
||||
// underlying cause.
|
||||
func diagnoseApplyError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
message := err.Error()
|
||||
for _, cand := range conditions {
|
||||
if cand.match.MatchString(message) {
|
||||
return &diagnostics.Err{
|
||||
Source: "Infrastructure Provider",
|
||||
Reason: cand.reason,
|
||||
Message: cand.message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type condition struct {
|
||||
match *regexp.Regexp
|
||||
|
||||
reason string
|
||||
message string
|
||||
}
|
||||
|
||||
// conditions is a list matches for the error string from terraform.
|
||||
// specific on the top, generic matches on the bottom.
|
||||
var conditions = []condition{{
|
||||
match: regexp.MustCompile(`Error: Error creating Blob .*: Error copy/waiting`),
|
||||
|
||||
reason: "Timeout",
|
||||
message: `Copying the VHD to user environment was too slow, and timeout was reached for the success.`,
|
||||
}, {
|
||||
match: regexp.MustCompile(`Error: Error Creating/Updating Subnet .*: network.SubnetsClient#CreateOrUpdate: .* Code="AnotherOperationInProgress" Message="Another operation on this or dependent resource is in progress`),
|
||||
|
||||
reason: "AzureMultiOperationFailure",
|
||||
message: `Creating Subnets failed because Azure could not process multiple operations.`,
|
||||
}, {
|
||||
match: regexp.MustCompile(`Error: Error Creating/Updating Public IP .*: network.PublicIPAddressesClient#CreateOrUpdate: .* Code="PublicIPCountLimitReached" Message="Cannot create more than .* public IP addresses for this subscription in this region`),
|
||||
|
||||
reason: "AzureQuotaLimitExceeded",
|
||||
message: `Service limits exceeded for Public IPs in the the subscriptions for the region. Requesting increase in quota should fix the error.`,
|
||||
}, {
|
||||
match: regexp.MustCompile(`Error: compute\.VirtualMachinesClient#CreateOrUpdate: .* Code="OperationNotAllowed" Message="Operation could not be completed as it results in exceeding approved Total Regional Cores quota`),
|
||||
|
||||
reason: "AzureQuotaLimitExceeded",
|
||||
message: `Service limits exceeded for Virtual Machine cores in the the subscriptions for the region. Requesting increase in quota should fix the error.`,
|
||||
}, {
|
||||
match: regexp.MustCompile(`Error: Code="OSProvisioningTimedOut"`),
|
||||
|
||||
reason: "AzureVirtualMachineFailure",
|
||||
message: `Some virtual machines failed to provision in alloted time. Virtual machines can fail to provision if the bootstap virtual machine has failing services.`,
|
||||
}, {
|
||||
match: regexp.MustCompile(`Status=404 Code="ResourceGroupNotFound"`),
|
||||
|
||||
reason: "AzureEventualConsistencyFailure",
|
||||
message: `Failed to find a resource that was recently created usualy caused by Azure's eventual consistency delays.`,
|
||||
}, {
|
||||
match: regexp.MustCompile(`Error: Error applying IAM policy to project .*: Too many conflicts`),
|
||||
|
||||
reason: "GCPTooManyIAMUpdatesInFlight",
|
||||
message: `There are a lot of IAM updates to the project in flight. Failed after reaching a limit of read-modify-write on conflict backoffs.`,
|
||||
}, {
|
||||
match: regexp.MustCompile(`Error: .*: googleapi: Error 503: .*, backendError`),
|
||||
|
||||
reason: "GCPBackendInternalError",
|
||||
message: `GCP is experiencing backend service interuptions. Please try again or contact Google Support`,
|
||||
}, {
|
||||
match: regexp.MustCompile(`Error: Error waiting for instance to create: Internal error`),
|
||||
|
||||
reason: "GCPComputeBackendTimeout",
|
||||
message: `GCP is experiencing backend service interuptions, the compute instance failed to create in reasonable time.`,
|
||||
}, {
|
||||
match: regexp.MustCompile(`Error: could not contact Ironic API: timeout reached`),
|
||||
|
||||
reason: "BaremetalIronicAPITimeout",
|
||||
message: `Unable to the reach provisioning service. This failure can be caused by incorrect network/proxy settings, inability to download the machine operating system images, or other misconfiguration. Please check access to the bootstrap host, and for any failing services.`,
|
||||
}, {
|
||||
match: regexp.MustCompile(`Error: could not inspect: could not inspect node, node is currently 'inspect failed', last error was 'timeout reached while inspecting the node'`),
|
||||
|
||||
reason: "BaremetalIronicInspectTimeout",
|
||||
message: `Timed out waiting for node inspection to complete. Please check the console on the host for more details.`,
|
||||
}}
|
||||
@@ -1,124 +0,0 @@
|
||||
package terraform
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDiagnoseApplyError(t *testing.T) {
|
||||
cases := []struct {
|
||||
input string
|
||||
err string
|
||||
}{{
|
||||
input: `Error: Error creating Blob "rhcoskltwa.vhd" (Container "vhd" / Account "clusterkltwa"): Error copy/waiting:
|
||||
on ../tmp/openshift-install-348626978/main.tf line 169, in resource "azurerm_storage_blob" "rhcos_image":"
|
||||
169: resource "azurerm_storage_blob" "rhcos_image" {
|
||||
`,
|
||||
err: `error\(Timeout\) from Infrastructure Provider: Copying the VHD to user environment was too slow, and timeout was reached for the success\.`,
|
||||
}, {
|
||||
input: `Error: Error Creating/Updating Subnet "xxxx-master-subnet" (Virtual Network "xxxx-vnet" / Resource Group "xxxx-rg"): network.SubnetsClient#CreateOrUpdate: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status=<nil> Code="AnotherOperationInProgress" Message="Another operation on this or dependent resource is in progress. To retrieve status of the operation use uri: https://management.azure.com/subscriptions/d38f1e38-4bed-438e-b227-833f997adf6a/providers/Microsoft.Network/locations/eastus2/operations/62c8a417-7168-464f-83e6-96912bd6b30a?api-version=2019-09-01." Details=[]
|
||||
|
||||
on ../tmp/openshift-install-513947104/vnet/vnet.tf line 10, in resource "azurerm_subnet" "master_subnet":"
|
||||
10: resource "azurerm_subnet" "master_subnet" {
|
||||
`,
|
||||
err: `error\(AzureMultiOperationFailure\) from Infrastructure Provider: Creating Subnets failed because Azure could not process multiple operations\.`,
|
||||
}, {
|
||||
input: `Error: Error Creating/Updating Public IP "xxxx-bootstrap-pip-v4" (Resource Group "xxxx-rg"): network.PublicIPAddressesClient#CreateOrUpdate: Failure sending request: StatusCode=400 -- Original Error: Code="PublicIPCountLimitReached" Message="Cannot create more than 50 public IP addresses for this subscription in this region." Details=[]
|
||||
|
||||
on ../tmp/openshift-install-172932975/bootstrap/main.tf line 65, in resource "azurerm_public_ip" "bootstrap_public_ip_v4":
|
||||
65: resource "azurerm_public_ip" "bootstrap_public_ip_v4" {
|
||||
`,
|
||||
|
||||
err: `error\(AzureQuotaLimitExceeded\) from Infrastructure Provider: Service limits exceeded for Public IPs in the the subscriptions for the region. Requesting increase in quota should fix the error\.`,
|
||||
}, {
|
||||
input: `Error: Code="OSProvisioningTimedOut" Message="OS Provisioning for VM 'xxxx-master-2' did not finish in the allotted time. The VM may still finish provisioning successfully. Please check provisioning state later. Also, make sure the image has been properly prepared (generalized).\\r\\n * Instructions for Windows: https://azure.microsoft.com/documentation/articles/virtual-machines-windows-upload-image/ \\r\\n * Instructions for Linux: https://azure.microsoft.com/documentation/articles/virtual-machines-linux-capture-image/ "
|
||||
|
||||
on ../tmp/openshift-install-172932975/master/master.tf line 81, in resource "azurerm_virtual_machine" "master":
|
||||
81: resource "azurerm_virtual_machine" "master" {
|
||||
`,
|
||||
|
||||
err: `error\(AzureVirtualMachineFailure\) from Infrastructure Provider: Some virtual machines failed to provision in alloted time`,
|
||||
}, {
|
||||
input: `
|
||||
Error: Error waiting for instance to create: Internal error. Please try again or contact Google Support. (Code: '8712799794455203922')
|
||||
|
||||
|
||||
on ../tmp/openshift-install-910996711/master/main.tf line 31, in resource "google_compute_instance" "master":
|
||||
31: resource "google_compute_instance" "master" {
|
||||
`,
|
||||
|
||||
err: `error\(GCPComputeBackendTimeout\) from Infrastructure Provider: GCP is experiencing backend service interuptions, the compute instance failed to create in reasonable time\.`,
|
||||
}, {
|
||||
input: `Error: Error reading Service Account "projects/project-id/serviceAccounts/xxxx-m@project-id.iam.gserviceaccount.com": googleapi: Error 503: The service is currently unavailable., backendError`,
|
||||
|
||||
err: `error\(GCPBackendInternalError\) from Infrastructure Provider: GCP is experiencing backend service interuptions. Please try again or contact Google Support`,
|
||||
}, {
|
||||
input: `
|
||||
Error: Error adding instances to InstanceGroup: googleapi: Error 503: Internal error. Please try again or contact Google Support. (Code: 'xxxx'), backendError
|
||||
|
||||
on ../tmp/openshift-install-267295217/bootstrap/main.tf line 87, in resource "google_compute_instance_group" "bootstrap":
|
||||
87: resource "google_compute_instance_group" "bootstrap" {
|
||||
`,
|
||||
|
||||
err: `error\(GCPBackendInternalError\) from Infrastructure Provider: GCP is experiencing backend service interuptions. Please try again or contact Google Support`,
|
||||
}, {
|
||||
input: `
|
||||
Error: Error applying IAM policy to project "project-id": Too many conflicts. Latest error: Error setting IAM policy for project "project-id": googleapi: Error 409: There were concurrent policy changes. Please retry the whole read-modify-write with exponential backoff., aborted
|
||||
|
||||
on ../tmp/openshift-install-392130810/master/main.tf line 26, in resource "google_project_iam_member" "master-service-account-user":
|
||||
26: resource "google_project_iam_member" "master-service-account-user" {
|
||||
`,
|
||||
|
||||
err: `error\(GCPTooManyIAMUpdatesInFlight\) from Infrastructure Provider: There are a lot of IAM updates to the project in flight. Failed after reaching a limit of read-modify-write on conflict backoffs\.`,
|
||||
}, {
|
||||
input: `
|
||||
Error: Error retrieving resource group: resources.GroupsClient#Get: Failure responding to request: StatusCode=404 -- Original Error: autorest/azure: Service returned an error. Status=404 Code="ResourceGroupNotFound" Message="Resource group 'xxxxx-rg' could not be found."
|
||||
|
||||
on ../tmp/openshift-install-424775273/main.tf line 124, in resource "azurerm_resource_group" "main":
|
||||
124: resource "azurerm_resource_group" "main" {
|
||||
`,
|
||||
|
||||
err: `error\(AzureEventualConsistencyFailure\) from Infrastructure Provider: Failed to find a resource that was recently created usualy caused by Azure's eventual consistency delays\.`,
|
||||
}, {
|
||||
input: `
|
||||
Error: compute.VirtualMachinesClient#CreateOrUpdate: Failure sending request: StatusCode=0 -- Original Error: autorest/azure: Service returned an error. Status=<nil> Code="OperationNotAllowed" Message="Operation could not be completed as it results in exceeding approved Total Regional Cores quota. Additional details - Deployment Model: Resource Manager, Location: centralus, Current Limit: 200, Current Usage: 198, Additional Required: 8, (Minimum) New Limit Required: 206. Submit a request for Quota increase at https://aka.ms/ProdportalCRP/?#create/Microsoft.Support/Parameters/%7B%22subId%22:%225f675811-04fa-483f-9709-ffd8a9da03f0%22,%22pesId%22:%2206bfd9d3-516b-d5c6-5802-169c800dec89%22,%22supportTopicId%22:%22e12e3d1d-7fa0-af33-c6d0-3c50df9658a3%22%7D by specifying parameters listed in the ‘Details’ section for deployment to succeed. Please read more about quota limits at https://docs.microsoft.com/en-us/azure/azure-supportability/regional-quota-requests."
|
||||
|
||||
on ../../../../tmp/openshift-install-941329162/master/master.tf line 81, in resource "azurerm_virtual_machine" "master":
|
||||
81: resource "azurerm_virtual_machine" "master" {
|
||||
`,
|
||||
|
||||
err: `error\(AzureQuotaLimitExceeded\) from Infrastructure Provider: Service limits exceeded for Virtual Machine cores in the the subscriptions for the region\. Requesting increase in quota should fix the error\.`,
|
||||
}, {
|
||||
input: `
|
||||
Error: could not contact Ironic API: timeout reached
|
||||
|
||||
on ../../../../tmp/openshift-install-431515935/masters/main.tf line 1, in resource "ironic_node_v1" "openshift-master-host":
|
||||
1: resource "ironic_node_v1" "openshift-master-host" {
|
||||
`,
|
||||
|
||||
err: `error\(BaremetalIronicAPITimeout\) from Infrastructure Provider: Unable to the reach provisioning service\. This failure can be caused by incorrect network/proxy settings, inability to download the machine operating system images, or other misconfiguration\. Please check access to the bootstrap host, and for any failing services\.`,
|
||||
}, {
|
||||
input: `
|
||||
Error: could not inspect: could not inspect node, node is currently 'inspect failed', last error was 'timeout reached while inspecting the node'
|
||||
|
||||
on ../../tmp/openshift-install-229338618/masters/main.tf line 1, in resource "ironic_node_v1" "openshift-master-host":
|
||||
1: resource "ironic_node_v1" "openshift-master-host" {
|
||||
`,
|
||||
|
||||
err: `error\(BaremetalIronicInspectTimeout\) from Infrastructure Provider: Timed out waiting for node inspection to complete\. Please check the console on the host for more details\.`,
|
||||
}}
|
||||
|
||||
for _, test := range cases {
|
||||
t.Run("", func(t *testing.T) {
|
||||
inError := errors.New(test.input)
|
||||
err := diagnoseApplyError(inError)
|
||||
if test.err == "" {
|
||||
assert.Equal(t, err, inError)
|
||||
} else {
|
||||
assert.Regexp(t, test.err, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
// Package terraform contains the utilities that's used for invoking
|
||||
// terraform executable under the given directory with the given
|
||||
// templates.
|
||||
package terraform
|
||||
@@ -1,134 +0,0 @@
|
||||
package terraform
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"text/template"
|
||||
|
||||
"github.com/hashicorp/terraform-exec/tfexec"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"github.com/openshift/installer/data"
|
||||
prov "github.com/openshift/installer/pkg/terraform/providers"
|
||||
)
|
||||
|
||||
// unpack unpacks the platform-specific Terraform modules into the
|
||||
// given directory.
|
||||
func unpack(dir string, platform string, target string) (err error) {
|
||||
err = data.Unpack(dir, filepath.Join(platform, target))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = data.Unpack(filepath.Join(dir, "config.tf"), "config.tf")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
platformVarFile := fmt.Sprintf("variables-%s.tf", platform)
|
||||
|
||||
err = data.Unpack(filepath.Join(dir, platformVarFile), filepath.Join(platform, platformVarFile))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = data.Unpack(filepath.Join(dir, "terraform.rc"), "terraform.rc")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// unpackAndInit unpacks the platform-specific Terraform modules into
|
||||
// the given directory and then runs 'terraform init'.
|
||||
func unpackAndInit(dir string, platform string, target string, terraformDir string, providers []prov.Provider) (err error) {
|
||||
err = unpack(dir, platform, target)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to unpack Terraform modules")
|
||||
}
|
||||
|
||||
if err := addVersionsFiles(dir, providers); err != nil {
|
||||
return errors.Wrap(err, "failed to write versions.tf files")
|
||||
}
|
||||
|
||||
tf, err := newTFExec(dir, terraformDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create a new tfexec")
|
||||
}
|
||||
|
||||
// Explicitly specify the CLI config file to use so that we control the providers that are used.
|
||||
os.Setenv("TF_CLI_CONFIG_FILE", filepath.Join(dir, "terraform.rc"))
|
||||
|
||||
return errors.Wrap(
|
||||
tf.Init(context.Background(), tfexec.PluginDir(filepath.Join(terraformDir, "plugins"))),
|
||||
"failed doing terraform init",
|
||||
)
|
||||
}
|
||||
|
||||
const versionFileTemplate = `terraform {
|
||||
required_version = ">= 1.0.0"
|
||||
required_providers {
|
||||
{{- range .}}
|
||||
{{.Name}} = {
|
||||
source = "{{.Source}}"
|
||||
}
|
||||
{{- end}}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
func addVersionsFiles(dir string, providers []prov.Provider) error {
|
||||
tmpl := template.Must(template.New("versions").Parse(versionFileTemplate))
|
||||
buf := &bytes.Buffer{}
|
||||
if err := tmpl.Execute(buf, providers); err != nil {
|
||||
return errors.Wrap(err, "could not create versions.tf from template")
|
||||
}
|
||||
return addFileToAllDirectories("versions.tf", buf.Bytes(), dir)
|
||||
}
|
||||
|
||||
func addFileToAllDirectories(name string, data []byte, dir string) error {
|
||||
if err := os.WriteFile(filepath.Join(dir, name), data, 0666); err != nil {
|
||||
return err
|
||||
}
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
if err := addFileToAllDirectories(name, data, filepath.Join(dir, entry.Name())); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnpackTerraform unpacks the terraform binary and the specified provider binaries into the specified directory.
|
||||
func UnpackTerraform(dir string, stages []Stage) error {
|
||||
// Unpack the terraform binary.
|
||||
if err := prov.UnpackTerraformBinary(filepath.Join(dir, "bin")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Unpack the providers.
|
||||
providers := sets.NewString()
|
||||
for _, stage := range stages {
|
||||
for _, provider := range stage.Providers() {
|
||||
if providers.Has(provider.Name) {
|
||||
continue
|
||||
}
|
||||
if err := provider.Extract(filepath.Join(dir, "plugins")); err != nil {
|
||||
return err
|
||||
}
|
||||
providers.Insert(provider.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package terraform
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type printfer struct {
|
||||
logger *logrus.Logger
|
||||
level logrus.Level
|
||||
}
|
||||
|
||||
func (t *printfer) Printf(format string, v ...interface{}) {
|
||||
t.logger.Logf(t.level, format, v...)
|
||||
}
|
||||
|
||||
func newPrintfer() *printfer {
|
||||
return &printfer{
|
||||
logger: logrus.StandardLogger(),
|
||||
level: logrus.DebugLevel,
|
||||
}
|
||||
}
|
||||
2
pkg/terraform/providers/.gitignore
vendored
2
pkg/terraform/providers/.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
/mirror/*
|
||||
!/mirror/README
|
||||
@@ -1,4 +0,0 @@
|
||||
Mirror of terraform binary and terraform providers to embed in the installer.
|
||||
The mirror is populated as part of the build process so that only the binaries for the target architecture are embedded.
|
||||
|
||||
(note that this file is needed to appease govet so that the directory is not empty)
|
||||
@@ -1,125 +0,0 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// AWS is the provider for creating resources in AWS.
|
||||
AWS = provider("aws")
|
||||
// AzureStack is the provider for creating resources in Azure Stack.
|
||||
AzureStack = provider("azurestack")
|
||||
// Google is the provider for creating resources in GCP.
|
||||
Google = provider("google")
|
||||
// IBM is the provider for creating resources in IBM Cloud.
|
||||
IBM = provider("ibm")
|
||||
// Ignition is the provider for creating ignition config files.
|
||||
Ignition = provider("ignition")
|
||||
// Libvirt is the provider for provisioning VMs on a libvirt host.
|
||||
Libvirt = provider("libvirt")
|
||||
// Local is the provider for creating local files.
|
||||
Local = provider("local")
|
||||
// Nutanix is the provider for creating resources in Nutanix.
|
||||
Nutanix = provider("nutanix")
|
||||
// OpenStack is the provider for creating resources in OpenStack.
|
||||
OpenStack = provider("openstack")
|
||||
// OVirt is the provider for creating resources in oVirt.
|
||||
OVirt = provider("ovirt")
|
||||
// Time is the provider for adding create and sleep requirements for resources.
|
||||
Time = provider("time")
|
||||
)
|
||||
|
||||
// Provider is a terraform provider.
|
||||
type Provider struct {
|
||||
// Name of the provider.
|
||||
Name string
|
||||
// Source of the provider.
|
||||
Source string
|
||||
}
|
||||
|
||||
// provider configures a provider built locally.
|
||||
func provider(name string) Provider {
|
||||
return Provider{
|
||||
Name: name,
|
||||
Source: fmt.Sprintf("openshift/local/%s", name),
|
||||
}
|
||||
}
|
||||
|
||||
//go:embed mirror/*
|
||||
var mirror embed.FS
|
||||
|
||||
// Extract extracts the provider from the embedded data into the specified directory.
|
||||
func (p Provider) Extract(dir string) error {
|
||||
providerDir := filepath.Join(strings.Split(p.Source, "/")...)
|
||||
destProviderDir := filepath.Join(dir, providerDir)
|
||||
destDir := destProviderDir
|
||||
srcDir := filepath.Join("mirror", providerDir)
|
||||
logrus.Debugf("creating %s directory", destDir)
|
||||
if err := os.MkdirAll(destDir, 0777); err != nil {
|
||||
return errors.Wrapf(err, "could not make directory for the %s provider", p.Name)
|
||||
}
|
||||
if err := unpack(srcDir, destDir); err != nil {
|
||||
return errors.Wrapf(err, "could not unpack the directory for the %s provider", p.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unpack(srcDir, destDir string) error {
|
||||
entries, err := mirror.ReadDir(srcDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
childSrcDir := filepath.Join(srcDir, entry.Name())
|
||||
childDestDir := filepath.Join(destDir, entry.Name())
|
||||
logrus.Debugf("creating %s directory", childDestDir)
|
||||
if err := os.Mkdir(childDestDir, 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := unpack(childSrcDir, childDestDir); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("creating %s file", filepath.Join(destDir, entry.Name()))
|
||||
if err := unpackFile(filepath.Join(srcDir, entry.Name()), filepath.Join(destDir, entry.Name())); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unpackFile(srcPath, destPath string) error {
|
||||
srcFile, err := mirror.Open(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer srcFile.Close()
|
||||
destFile, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer destFile.Close()
|
||||
if _, err := io.Copy(destFile, srcFile); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnpackTerraformBinary unpacks the terraform binary from the embedded data so that it can be run to create the
|
||||
// infrastructure for the cluster.
|
||||
func UnpackTerraformBinary(dir string) error {
|
||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
return unpack("mirror/terraform", dir)
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package terraform
|
||||
|
||||
import (
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/terraform/providers"
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
)
|
||||
|
||||
// Stage is an individual stage of terraform infrastructure provisioning.
|
||||
type Stage interface {
|
||||
// Name is the name of the stage.
|
||||
Name() string
|
||||
|
||||
// Platform is the name of the platform.
|
||||
Platform() string
|
||||
|
||||
// StateFilename is the name of the terraform state file.
|
||||
StateFilename() string
|
||||
|
||||
// OutputsFilename is the name of the outputs file for the stage.
|
||||
OutputsFilename() string
|
||||
|
||||
// Providers is the list of providers that are used for the stage.
|
||||
Providers() []providers.Provider
|
||||
|
||||
// DestroyWithBootstrap is true if the stage should be destroyed when destroying the bootstrap resources.
|
||||
DestroyWithBootstrap() bool
|
||||
|
||||
// Destroy destroys the resources created in the stage. This should only be called if the stage should be destroyed
|
||||
// when destroying the bootstrap resources.
|
||||
Destroy(directory string, terraformDir string, varFiles []string) error
|
||||
|
||||
// ExtractHostAddresses extracts the IPs of the bootstrap and control plane machines.
|
||||
ExtractHostAddresses(directory string, config *types.InstallConfig) (bootstrap string, port int, masters []string, err error)
|
||||
|
||||
// ExtractLBConfig extracts the LB DNS Names of the internal and external API LBs.
|
||||
ExtractLBConfig(directory string, terraformDir string, file *asset.File, tfvarsFile *asset.File) (ignition string, err error)
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
# This file just uses aliases defined in OWNERS_ALIASES.
|
||||
|
||||
approvers:
|
||||
- azure-approvers
|
||||
reviewers:
|
||||
- azure-reviewers
|
||||
@@ -1,28 +0,0 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"github.com/openshift/installer/pkg/terraform"
|
||||
"github.com/openshift/installer/pkg/terraform/providers"
|
||||
"github.com/openshift/installer/pkg/terraform/stages"
|
||||
typesazure "github.com/openshift/installer/pkg/types/azure"
|
||||
)
|
||||
|
||||
// StackPlatformStages are the stages to run to provision the infrastructure in Azure Stack.
|
||||
var StackPlatformStages = []terraform.Stage{
|
||||
stages.NewStage(
|
||||
typesazure.StackTerraformName,
|
||||
"vnet",
|
||||
[]providers.Provider{providers.AzureStack},
|
||||
),
|
||||
stages.NewStage(
|
||||
typesazure.StackTerraformName,
|
||||
"bootstrap",
|
||||
[]providers.Provider{providers.AzureStack, providers.Ignition, providers.Local},
|
||||
stages.WithNormalBootstrapDestroy(),
|
||||
),
|
||||
stages.NewStage(
|
||||
typesazure.StackTerraformName,
|
||||
"cluster",
|
||||
[]providers.Provider{providers.AzureStack},
|
||||
),
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
# This file just uses aliases defined in OWNERS_ALIASES.
|
||||
|
||||
approvers:
|
||||
- ibmcloud-approvers
|
||||
reviewers:
|
||||
- ibmcloud-reviewers
|
||||
@@ -1,59 +0,0 @@
|
||||
package ibmcloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/hashicorp/terraform-exec/tfexec"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/openshift/installer/pkg/terraform"
|
||||
"github.com/openshift/installer/pkg/terraform/providers"
|
||||
"github.com/openshift/installer/pkg/terraform/stages"
|
||||
ibmcloudtfvars "github.com/openshift/installer/pkg/tfvars/ibmcloud"
|
||||
ibmcloudtypes "github.com/openshift/installer/pkg/types/ibmcloud"
|
||||
)
|
||||
|
||||
// PlatformStages are the stages to run to provision the infrastructure in IBM Cloud.
|
||||
var PlatformStages = []terraform.Stage{
|
||||
stages.NewStage(
|
||||
"ibmcloud",
|
||||
"network",
|
||||
[]providers.Provider{providers.IBM},
|
||||
),
|
||||
stages.NewStage(
|
||||
"ibmcloud",
|
||||
"bootstrap",
|
||||
[]providers.Provider{providers.IBM},
|
||||
stages.WithCustomBootstrapDestroy(customBootstrapDestroy),
|
||||
),
|
||||
stages.NewStage(
|
||||
"ibmcloud",
|
||||
"master",
|
||||
[]providers.Provider{providers.IBM},
|
||||
),
|
||||
}
|
||||
|
||||
func customBootstrapDestroy(s stages.SplitStage, directory string, terraformDir string, varFiles []string) error {
|
||||
opts := make([]tfexec.DestroyOption, 0, len(varFiles)+1)
|
||||
for _, varFile := range varFiles {
|
||||
opts = append(opts, tfexec.VarFile(varFile))
|
||||
}
|
||||
|
||||
// If these is a endpoint override JSON file in the terraformDir's parent directory (terraformDir isn't available during JSON file creation),
|
||||
// we want to inject that file into the Terraform variables so IBM Cloud Service endpoints are overridden.
|
||||
terraformParentDir := filepath.Dir(terraformDir)
|
||||
endpointOverrideFile := filepath.Join(terraformParentDir, ibmcloudtfvars.IBMCloudEndpointJSONFileName)
|
||||
if _, err := os.Stat(endpointOverrideFile); err == nil {
|
||||
// Set variable to use private endpoints (overrides) from JSON file, via the IBM Cloud Terraform variable: 'ibmcloud_endpoints_json_file'.
|
||||
opts = append(opts, tfexec.Var(fmt.Sprintf("ibmcloud_endpoints_json_file=%s", endpointOverrideFile)))
|
||||
logrus.Debugf("configuring terraform bootstrap destroy with ibm endpoint overrides: %s", endpointOverrideFile)
|
||||
}
|
||||
err := terraform.Destroy(directory, ibmcloudtypes.Name, s, terraformDir, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to destroy bootstrap: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
package stages
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
igntypes "github.com/coreos/ignition/v2/config/v3_2/types"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
"github.com/openshift/installer/pkg/types/gcp"
|
||||
)
|
||||
|
||||
const (
|
||||
// replaceable is the string that precedes the encoded data in the ignition data.
|
||||
// The data must be replaced before decoding the string, and the string must be
|
||||
// prepended to the encoded data.
|
||||
replaceable = "data:text/plain;charset=utf-8;base64,"
|
||||
)
|
||||
|
||||
// AddLoadBalancersToInfra will load the public and private load balancer information into
|
||||
// the infrastructure CR. This will occur after the data has already been inserted into the
|
||||
// ignition file.
|
||||
func AddLoadBalancersToInfra(platform string, config *igntypes.Config, publicLBs []string, privateLBs []string) error {
|
||||
index := -1
|
||||
for i, fileData := range config.Storage.Files {
|
||||
// update the contents of this file
|
||||
if fileData.Path == "/opt/openshift/manifests/cluster-infrastructure-02-config.yml" {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if index >= 0 {
|
||||
contents := config.Storage.Files[index].Contents.Source
|
||||
replaced := strings.Replace(*contents, replaceable, "", 1)
|
||||
|
||||
rawDecodedText, err := base64.StdEncoding.DecodeString(replaced)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
infra := &configv1.Infrastructure{}
|
||||
if err := yaml.Unmarshal(rawDecodedText, infra); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// convert the list of strings to a list of IPs
|
||||
apiIntLbs := []configv1.IP{}
|
||||
for _, ip := range privateLBs {
|
||||
apiIntLbs = append(apiIntLbs, configv1.IP(ip))
|
||||
}
|
||||
apiLbs := []configv1.IP{}
|
||||
for _, ip := range publicLBs {
|
||||
apiLbs = append(apiLbs, configv1.IP(ip))
|
||||
}
|
||||
cloudLBInfo := configv1.CloudLoadBalancerIPs{
|
||||
APIIntLoadBalancerIPs: apiIntLbs,
|
||||
APILoadBalancerIPs: apiLbs,
|
||||
}
|
||||
|
||||
switch platform {
|
||||
case gcp.Name:
|
||||
if infra.Status.PlatformStatus.GCP.CloudLoadBalancerConfig.DNSType == configv1.ClusterHostedDNSType {
|
||||
infra.Status.PlatformStatus.GCP.CloudLoadBalancerConfig.ClusterHosted = &cloudLBInfo
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("failed to set load balancer info for platform %s", platform)
|
||||
}
|
||||
|
||||
// convert the infrastructure back to an encoded string
|
||||
infraContents, err := yaml.Marshal(infra)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
encoded := fmt.Sprintf("%s%s", replaceable, base64.StdEncoding.EncodeToString(infraContents))
|
||||
config.Storage.Files[index].Contents.Source = &encoded
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
# This file just uses aliases defined in OWNERS_ALIASES.
|
||||
|
||||
approvers:
|
||||
- libvirt-approvers
|
||||
reviewers:
|
||||
- libvirt-reviewers
|
||||
@@ -1,7 +0,0 @@
|
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
# This file just uses aliases defined in OWNERS_ALIASES.
|
||||
|
||||
approvers:
|
||||
- ovirt-approvers
|
||||
reviewers:
|
||||
- ovirt-reviewers
|
||||
@@ -1,162 +0,0 @@
|
||||
package ovirt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
ovirtsdk4 "github.com/ovirt/go-ovirt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset/installconfig/ovirt"
|
||||
"github.com/openshift/installer/pkg/terraform"
|
||||
"github.com/openshift/installer/pkg/terraform/providers"
|
||||
"github.com/openshift/installer/pkg/terraform/stages"
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
ovirttypes "github.com/openshift/installer/pkg/types/ovirt"
|
||||
)
|
||||
|
||||
const bootstrapSSHPort = 22
|
||||
|
||||
var bootstrapSSHPortAsString = strconv.Itoa(22)
|
||||
|
||||
// PlatformStages are the stages to run to provision the infrastructure in oVirt.
|
||||
var PlatformStages = []terraform.Stage{
|
||||
stages.NewStage(
|
||||
ovirttypes.Name,
|
||||
"image",
|
||||
[]providers.Provider{providers.OVirt},
|
||||
stages.WithNormalBootstrapDestroy(),
|
||||
),
|
||||
stages.NewStage(
|
||||
ovirttypes.Name,
|
||||
"cluster",
|
||||
[]providers.Provider{providers.OVirt},
|
||||
stages.WithCustomExtractHostAddresses(extractOutputHostAddresses),
|
||||
),
|
||||
stages.NewStage(
|
||||
ovirttypes.Name,
|
||||
"bootstrap",
|
||||
[]providers.Provider{providers.OVirt},
|
||||
stages.WithNormalBootstrapDestroy(),
|
||||
stages.WithCustomExtractHostAddresses(extractOutputHostAddresses),
|
||||
),
|
||||
}
|
||||
|
||||
func extractOutputHostAddresses(s stages.SplitStage, directory string, ic *types.InstallConfig) (bootstrapIP string, sshPort int, controlPlaneIPs []string, returnErr error) {
|
||||
sshPort = bootstrapSSHPort
|
||||
|
||||
outputs, err := stages.GetTerraformOutputs(s, directory)
|
||||
if err != nil {
|
||||
returnErr = err
|
||||
return
|
||||
}
|
||||
|
||||
client, err := ovirt.NewConnection()
|
||||
if err != nil {
|
||||
returnErr = errors.Wrap(err, "failed to initialize connection to ovirt-engine")
|
||||
return
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
if vmIDRaw, ok := outputs["bootstrap_vm_id"]; ok {
|
||||
vmID, ok := vmIDRaw.(string)
|
||||
if !ok {
|
||||
returnErr = errors.New("could not read bootstrap VM ID from terraform outputs")
|
||||
return
|
||||
}
|
||||
ip, err := findVirtualMachineIP(vmID, client)
|
||||
if err != nil {
|
||||
returnErr = errors.Wrapf(err, "could not find IP address for bootstrap instance %q", vmID)
|
||||
return
|
||||
}
|
||||
bootstrapIP = ip
|
||||
}
|
||||
|
||||
if vmIDsRaw, ok := outputs["control_plane_vm_ids"]; ok {
|
||||
vmIDs, ok := vmIDsRaw.([]interface{})
|
||||
if !ok {
|
||||
returnErr = errors.New("could not read control plane VM IDs from terraform outputs")
|
||||
return
|
||||
}
|
||||
controlPlaneIPs = make([]string, len(vmIDs))
|
||||
for i, vmIDRaw := range vmIDs {
|
||||
vmID, ok := vmIDRaw.(string)
|
||||
if !ok {
|
||||
returnErr = errors.New("could not read control plane VM ID from terraform outputs")
|
||||
return
|
||||
}
|
||||
ip, err := findVirtualMachineIP(vmID, client)
|
||||
if err != nil {
|
||||
returnErr = errors.Wrapf(err, "could not find IP address for bootstrap instance %q", vmID)
|
||||
return
|
||||
}
|
||||
controlPlaneIPs[i] = ip
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func checkPortIsOpen(host string, port string) bool {
|
||||
timeout := time.Second
|
||||
conn, err := net.DialTimeout("tcp", net.JoinHostPort(host, port), timeout)
|
||||
if err != nil {
|
||||
logrus.Debugf("connection error: %v", err)
|
||||
return false
|
||||
}
|
||||
if conn != nil {
|
||||
defer conn.Close()
|
||||
}
|
||||
return conn != nil
|
||||
}
|
||||
|
||||
func getReportedDevices(c *ovirtsdk4.Connection, vmID string) (*ovirtsdk4.ReportedDeviceSlice, error) {
|
||||
vmsService := c.SystemService().VmsService()
|
||||
// Look up the vm by id:
|
||||
vmResp, err := vmsService.VmService(vmID).Get().Send()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find VM, by id %v, reason: %v", vmID, err)
|
||||
}
|
||||
vm := vmResp.MustVm()
|
||||
|
||||
// Get the reported-devices service for this vm:
|
||||
reportedDevicesService := vmsService.VmService(vm.MustId()).ReportedDevicesService()
|
||||
|
||||
// Get the guest reported devices
|
||||
reportedDeviceResp, err := reportedDevicesService.List().Send()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get reported devices list, reason: %v", err)
|
||||
}
|
||||
reportedDeviceSlice, hasIps := reportedDeviceResp.ReportedDevice()
|
||||
|
||||
if !hasIps {
|
||||
return nil, fmt.Errorf("cannot find IPs for vmId: %s", vmID)
|
||||
}
|
||||
return reportedDeviceSlice, nil
|
||||
}
|
||||
|
||||
func findVirtualMachineIP(instanceID string, client *ovirtsdk4.Connection) (string, error) {
|
||||
reportedDeviceSlice, err := getReportedDevices(client, instanceID)
|
||||
if err == nil {
|
||||
return "", errors.Wrapf(err, "could not Find IP Address for vm id: %s", instanceID)
|
||||
}
|
||||
|
||||
for _, reportedDevice := range reportedDeviceSlice.Slice() {
|
||||
ips, hasIps := reportedDevice.Ips()
|
||||
if hasIps {
|
||||
for _, ip := range ips.Slice() {
|
||||
ipres, hasAddress := ip.Address()
|
||||
if hasAddress {
|
||||
if checkPortIsOpen(ipres, bootstrapSSHPortAsString) {
|
||||
logrus.Debugf("ovirt vm id: %s , found usable IP Address: %s", instanceID, ipres)
|
||||
return ipres, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("could not find usable IP address for vm id: %s", instanceID)
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
# This file just uses aliases defined in OWNERS_ALIASES.
|
||||
|
||||
approvers:
|
||||
- powervs-approvers
|
||||
reviewers:
|
||||
- powervs-reviewers
|
||||
@@ -1,43 +0,0 @@
|
||||
package powervs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-exec/tfexec"
|
||||
|
||||
"github.com/openshift/installer/pkg/terraform"
|
||||
"github.com/openshift/installer/pkg/terraform/providers"
|
||||
"github.com/openshift/installer/pkg/terraform/stages"
|
||||
powervstypes "github.com/openshift/installer/pkg/types/powervs"
|
||||
)
|
||||
|
||||
// PlatformStages are the stages to run to provision the infrastructure in PowerVS.
|
||||
var PlatformStages = []terraform.Stage{
|
||||
stages.NewStage("powervs",
|
||||
"cluster",
|
||||
[]providers.Provider{providers.IBM, providers.Ignition, providers.Time}),
|
||||
stages.NewStage("powervs",
|
||||
"bootstrap",
|
||||
[]providers.Provider{providers.IBM, providers.Ignition, providers.Time},
|
||||
stages.WithNormalBootstrapDestroy()),
|
||||
stages.NewStage("powervs",
|
||||
"bootstrap-routing",
|
||||
[]providers.Provider{providers.IBM},
|
||||
stages.WithCustomBootstrapDestroy(removeFromLoadBalancers)),
|
||||
}
|
||||
|
||||
func removeFromLoadBalancers(s stages.SplitStage, directory string, terraformDir string, varFiles []string) error {
|
||||
opts := make([]tfexec.ApplyOption, 0, len(varFiles)+1)
|
||||
for _, varFile := range varFiles {
|
||||
opts = append(opts, tfexec.VarFile(varFile))
|
||||
}
|
||||
opts = append(opts, tfexec.Var("powervs_expose_bootstrap=false"))
|
||||
err := terraform.Apply(directory, powervstypes.Name, s, terraformDir, opts...)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf(
|
||||
"failed disabling bootstrap load balancing: %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
@@ -1,203 +0,0 @@
|
||||
package stages
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/hashicorp/terraform-exec/tfexec"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/terraform"
|
||||
"github.com/openshift/installer/pkg/terraform/providers"
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
)
|
||||
|
||||
// StageOption is an option for configuring a split stage.
|
||||
type StageOption func(*SplitStage)
|
||||
|
||||
// NewStage creates a new split stage.
|
||||
// The default behavior is the following. The behavior can be changed by providing StageOptions.
|
||||
// - The resources of the stage will not be deleted as part of destroying the bootstrap.
|
||||
// - The IP addresses for the bootstrap and control plane VMs will be output from the stage as bootstrap_ip and
|
||||
// control_plane_ips, respectively. Only one stage for the platform should output a particular variable. This will
|
||||
// likely be the same stage that creates the VM.
|
||||
func NewStage(platform, name string, providers []providers.Provider, opts ...StageOption) SplitStage {
|
||||
s := SplitStage{
|
||||
platform: platform,
|
||||
name: name,
|
||||
providers: providers,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(&s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// WithNormalBootstrapDestroy returns an option for specifying that a split stage should use the normal bootstrap
|
||||
// destroy process. The normal process is to fully delete all of the resources created in the stage.
|
||||
func WithNormalBootstrapDestroy() StageOption {
|
||||
return WithCustomBootstrapDestroy(normalDestroy)
|
||||
}
|
||||
|
||||
// WithCustomBootstrapDestroy returns an option for specifying that a split stage should use a custom bootstrap
|
||||
// destroy process.
|
||||
func WithCustomBootstrapDestroy(destroy DestroyFunc) StageOption {
|
||||
return func(s *SplitStage) {
|
||||
s.destroyWithBootstrap = true
|
||||
s.destroy = destroy
|
||||
}
|
||||
}
|
||||
|
||||
// WithCustomExtractHostAddresses returns an option for specifying that a split stage should use a custom extract host addresses process.
|
||||
func WithCustomExtractHostAddresses(extractHostAddresses ExtractFunc) StageOption {
|
||||
return func(s *SplitStage) {
|
||||
s.extractHostAddresses = extractHostAddresses
|
||||
}
|
||||
}
|
||||
|
||||
// WithCustomExtractLBConfig returns an option for specifying that a split stage
|
||||
// should use a custom method to extract load balancer DNS names.
|
||||
func WithCustomExtractLBConfig(extractLBConfig ExtractLBConfigFunc) StageOption {
|
||||
return func(s *SplitStage) {
|
||||
s.extractLBConfig = extractLBConfig
|
||||
}
|
||||
}
|
||||
|
||||
// SplitStage is a split stage.
|
||||
type SplitStage struct {
|
||||
platform string
|
||||
name string
|
||||
providers []providers.Provider
|
||||
destroyWithBootstrap bool
|
||||
destroy DestroyFunc
|
||||
extractHostAddresses ExtractFunc
|
||||
extractLBConfig ExtractLBConfigFunc
|
||||
}
|
||||
|
||||
// DestroyFunc is a function for destroying the stage.
|
||||
type DestroyFunc func(s SplitStage, directory string, terraformDir string, varFiles []string) error
|
||||
|
||||
// ExtractFunc is a function for extracting host addresses.
|
||||
type ExtractFunc func(s SplitStage, directory string, ic *types.InstallConfig) (string, int, []string, error)
|
||||
|
||||
// ExtractLBConfigFunc is a function for extracting LB DNS Names.
|
||||
type ExtractLBConfigFunc func(s SplitStage, directory string, terraformDir string, file *asset.File, tfvarsFile *asset.File) (string, error)
|
||||
|
||||
// Name implements pkg/terraform/Stage.Name
|
||||
func (s SplitStage) Name() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
// Providers is the list of providers that are used for the stage.
|
||||
func (s SplitStage) Providers() []providers.Provider {
|
||||
return s.providers
|
||||
}
|
||||
|
||||
// StateFilename implements pkg/terraform/Stage.StateFilename
|
||||
func (s SplitStage) StateFilename() string {
|
||||
return fmt.Sprintf("terraform.%s.tfstate", s.name)
|
||||
}
|
||||
|
||||
// OutputsFilename implements pkg/terraform/Stage.OutputsFilename
|
||||
func (s SplitStage) OutputsFilename() string {
|
||||
return fmt.Sprintf("%s.tfvars.json", s.name)
|
||||
}
|
||||
|
||||
// DestroyWithBootstrap implements pkg/terraform/Stage.DestroyWithBootstrap
|
||||
func (s SplitStage) DestroyWithBootstrap() bool {
|
||||
return s.destroyWithBootstrap
|
||||
}
|
||||
|
||||
// Destroy implements pkg/terraform/Stage.Destroy
|
||||
func (s SplitStage) Destroy(directory string, terraformDir string, varFiles []string) error {
|
||||
return s.destroy(s, directory, terraformDir, varFiles)
|
||||
}
|
||||
|
||||
// Platform implements pkg/terraform/Stage.Platform.
|
||||
func (s SplitStage) Platform() string {
|
||||
return s.platform
|
||||
}
|
||||
|
||||
// ExtractHostAddresses implements pkg/terraform/Stage.ExtractHostAddresses
|
||||
func (s SplitStage) ExtractHostAddresses(directory string, ic *types.InstallConfig) (string, int, []string, error) {
|
||||
if s.extractHostAddresses != nil {
|
||||
return s.extractHostAddresses(s, directory, ic)
|
||||
}
|
||||
return normalExtractHostAddresses(s, directory, ic)
|
||||
}
|
||||
|
||||
// ExtractLBConfig implements pkg/terraform/Stage.ExtractLBConfig.
|
||||
func (s SplitStage) ExtractLBConfig(directory string, terraformDir string, file *asset.File, tfvarsFile *asset.File) (string, error) {
|
||||
if s.extractLBConfig != nil {
|
||||
return s.extractLBConfig(s, directory, terraformDir, file, tfvarsFile)
|
||||
}
|
||||
return normalExtractLBConfig(s, directory, terraformDir, file, tfvarsFile)
|
||||
}
|
||||
|
||||
// GetTerraformOutputs reads the terraform outputs file for the stage and parses it into a map of outputs.
|
||||
func GetTerraformOutputs(s SplitStage, directory string) (map[string]interface{}, error) {
|
||||
outputsFilePath := filepath.Join(directory, s.OutputsFilename())
|
||||
if _, err := os.Stat(outputsFilePath); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not find outputs file %q", outputsFilePath)
|
||||
}
|
||||
|
||||
outputsFile, err := os.ReadFile(outputsFilePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to read outputs file %q", outputsFilePath)
|
||||
}
|
||||
|
||||
outputs := map[string]interface{}{}
|
||||
if err := json.Unmarshal(outputsFile, &outputs); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not unmarshal outputs file %q", outputsFilePath)
|
||||
}
|
||||
|
||||
return outputs, nil
|
||||
}
|
||||
|
||||
func normalExtractHostAddresses(s SplitStage, directory string, _ *types.InstallConfig) (string, int, []string, error) {
|
||||
outputs, err := GetTerraformOutputs(s, directory)
|
||||
if err != nil {
|
||||
return "", 0, nil, err
|
||||
}
|
||||
|
||||
var bootstrap string
|
||||
if bootstrapRaw, ok := outputs["bootstrap_ip"]; ok {
|
||||
bootstrap, ok = bootstrapRaw.(string)
|
||||
if !ok {
|
||||
return "", 0, nil, errors.New("could not read bootstrap IP from terraform outputs")
|
||||
}
|
||||
}
|
||||
|
||||
var masters []string
|
||||
if mastersRaw, ok := outputs["control_plane_ips"]; ok {
|
||||
mastersSlice, ok := mastersRaw.([]interface{})
|
||||
if !ok {
|
||||
return "", 0, nil, errors.New("could not read control plane IPs from terraform outputs")
|
||||
}
|
||||
masters = make([]string, len(mastersSlice))
|
||||
for i, ipRaw := range mastersSlice {
|
||||
ip, ok := ipRaw.(string)
|
||||
if !ok {
|
||||
return "", 0, nil, errors.New("could not read control plane IPs from terraform outputs")
|
||||
}
|
||||
masters[i] = ip
|
||||
}
|
||||
}
|
||||
|
||||
return bootstrap, 0, masters, nil
|
||||
}
|
||||
|
||||
func normalDestroy(s SplitStage, directory string, terraformDir string, varFiles []string) error {
|
||||
opts := make([]tfexec.DestroyOption, len(varFiles))
|
||||
for i, varFile := range varFiles {
|
||||
opts[i] = tfexec.VarFile(varFile)
|
||||
}
|
||||
return errors.Wrap(terraform.Destroy(directory, s.platform, s, terraformDir, opts...), "terraform destroy")
|
||||
}
|
||||
|
||||
func normalExtractLBConfig(s SplitStage, directory string, terraformDir string, file *asset.File, tfvarsFile *asset.File) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
package terraform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// StateFilename is the default name of the terraform state file.
|
||||
const StateFilename = "terraform.tfstate"
|
||||
|
||||
// Outputs reads the terraform state file and returns the outputs of the stage as json.
|
||||
func Outputs(dir string, terraformDir string) ([]byte, error) {
|
||||
tf, err := newTFExec(dir, terraformDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tfoutput, err := tf.Output(context.Background())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read terraform state file")
|
||||
}
|
||||
|
||||
outputs := make(map[string]interface{}, len(tfoutput))
|
||||
for key, value := range tfoutput {
|
||||
outputs[key] = value.Value
|
||||
}
|
||||
|
||||
data, err := json.Marshal(outputs)
|
||||
return data, errors.Wrap(err, "could not marshal outputs")
|
||||
}
|
||||
@@ -1,317 +0,0 @@
|
||||
package terraform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/hashicorp/terraform-exec/tfexec"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/cluster/tfvars"
|
||||
"github.com/openshift/installer/pkg/infrastructure"
|
||||
"github.com/openshift/installer/pkg/lineprinter"
|
||||
"github.com/openshift/installer/pkg/metrics/timer"
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
)
|
||||
|
||||
const (
|
||||
tfVarsFileName = "terraform.tfvars.json"
|
||||
tfPlatformVarsFileName = "terraform.platform.auto.tfvars.json"
|
||||
)
|
||||
|
||||
// Provider implements the infrastructure.Provider interface.
|
||||
type Provider struct {
|
||||
stages []Stage
|
||||
}
|
||||
|
||||
// InitializeProvider creates a concrete infrastructure.Provider for the given platform.
|
||||
func InitializeProvider(stages []Stage) infrastructure.Provider {
|
||||
return &Provider{stages}
|
||||
}
|
||||
|
||||
// Provision implements pkg/infrastructure/provider.Provision. Provision iterates
|
||||
// through each of the stages and applies the Terraform config for the stage.
|
||||
func (p *Provider) Provision(_ context.Context, dir string, parents asset.Parents) ([]*asset.File, error) {
|
||||
tfVars := &tfvars.TerraformVariables{}
|
||||
parents.Get(tfVars)
|
||||
vars := tfVars.Files()
|
||||
|
||||
fileList := []*asset.File{}
|
||||
terraformDir := filepath.Join(dir, "terraform")
|
||||
if err := os.Mkdir(terraformDir, 0777); err != nil {
|
||||
return nil, fmt.Errorf("could not create the terraform directory: %w", err)
|
||||
}
|
||||
|
||||
terraformDirPath, err := filepath.Abs(terraformDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get absolute path of terraform directory: %w", err)
|
||||
}
|
||||
|
||||
defer os.RemoveAll(terraformDir)
|
||||
if err = UnpackTerraform(terraformDirPath, p.stages); err != nil {
|
||||
return nil, fmt.Errorf("error unpacking terraform: %w", err)
|
||||
}
|
||||
|
||||
for _, stage := range p.stages {
|
||||
outputs, stateFile, err := applyStage(stage.Platform(), stage, terraformDirPath, vars)
|
||||
if err != nil {
|
||||
// Write the state file to the install directory even if the apply failed.
|
||||
if stateFile != nil {
|
||||
fileList = append(fileList, stateFile)
|
||||
}
|
||||
return fileList, fmt.Errorf("failure applying terraform for %q stage: %w", stage.Name(), err)
|
||||
}
|
||||
vars = append(vars, outputs)
|
||||
fileList = append(fileList, outputs)
|
||||
fileList = append(fileList, stateFile)
|
||||
|
||||
_, extErr := stage.ExtractLBConfig(dir, terraformDirPath, outputs, vars[0])
|
||||
if extErr != nil {
|
||||
return fileList, fmt.Errorf("failed to extract load balancer information: %w", extErr)
|
||||
}
|
||||
}
|
||||
return fileList, nil
|
||||
}
|
||||
|
||||
// DestroyBootstrap implements pkg/infrastructure/provider.DestroyBootstrap.
|
||||
// DestroyBootstrap iterates through each stage, and will run the destroy
|
||||
// command when defined on a stage.
|
||||
func (p *Provider) DestroyBootstrap(ctx context.Context, dir string) error {
|
||||
varFiles := []string{tfVarsFileName, tfPlatformVarsFileName}
|
||||
for _, stage := range p.stages {
|
||||
varFiles = append(varFiles, stage.OutputsFilename())
|
||||
}
|
||||
|
||||
terraformDir := filepath.Join(dir, "terraform")
|
||||
if err := os.Mkdir(terraformDir, 0777); err != nil {
|
||||
return fmt.Errorf("could not create the terraform directory: %w", err)
|
||||
}
|
||||
|
||||
terraformDirPath, err := filepath.Abs(terraformDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get absolute path of terraform directory: %w", err)
|
||||
}
|
||||
|
||||
defer os.RemoveAll(terraformDirPath)
|
||||
if err = UnpackTerraform(terraformDirPath, p.stages); err != nil {
|
||||
return fmt.Errorf("error unpacking terraform: %w", err)
|
||||
}
|
||||
|
||||
for i := len(p.stages) - 1; i >= 0; i-- {
|
||||
stage := p.stages[i]
|
||||
|
||||
if !stage.DestroyWithBootstrap() {
|
||||
continue
|
||||
}
|
||||
|
||||
tempDir, err := os.MkdirTemp("", fmt.Sprintf("openshift-install-%s-", stage.Name()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temporary directory for Terraform execution: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
stateFilePathInInstallDir := filepath.Join(dir, stage.StateFilename())
|
||||
stateFilePathInTempDir := filepath.Join(tempDir, StateFilename)
|
||||
if err := copyFile(stateFilePathInInstallDir, stateFilePathInTempDir); err != nil {
|
||||
return fmt.Errorf("failed to copy state file to the temporary directory: %w", err)
|
||||
}
|
||||
|
||||
targetVarFiles := make([]string, 0, len(varFiles))
|
||||
for _, filename := range varFiles {
|
||||
sourcePath := filepath.Join(dir, filename)
|
||||
targetPath := filepath.Join(tempDir, filename)
|
||||
if err := copyFile(sourcePath, targetPath); err != nil {
|
||||
// platform may not need platform-specific Terraform variables
|
||||
if filename == tfPlatformVarsFileName {
|
||||
var pErr *os.PathError
|
||||
if errors.As(err, &pErr) && pErr.Path == sourcePath {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to copy %s to the temporary directory: %w", filename, err)
|
||||
}
|
||||
targetVarFiles = append(targetVarFiles, targetPath)
|
||||
}
|
||||
|
||||
if err := stage.Destroy(tempDir, terraformDirPath, targetVarFiles); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := copyFile(stateFilePathInTempDir, stateFilePathInInstallDir); err != nil {
|
||||
return fmt.Errorf("failed to copy state file from the temporary directory: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtractHostAddresses implements pkg/infrastructure/provider.ExtractHostAddresses. Extracts the addresses to be used
|
||||
// for gathering debug logs by inspecting the Terraform output files.
|
||||
func (p *Provider) ExtractHostAddresses(dir string, config *types.InstallConfig, ha *infrastructure.HostAddresses) error {
|
||||
for _, stage := range p.stages {
|
||||
stageBootstrap, stagePort, stageMasters, err := stage.ExtractHostAddresses(dir, config)
|
||||
if err != nil {
|
||||
logrus.Warnf("Failed to extract host addresses: %s", err.Error())
|
||||
} else {
|
||||
if stageBootstrap != "" {
|
||||
ha.Bootstrap = stageBootstrap
|
||||
}
|
||||
if stagePort != 0 {
|
||||
ha.Port = stagePort
|
||||
}
|
||||
if len(stageMasters) > 0 {
|
||||
ha.Masters = stageMasters
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newTFExec creates a tfexec.Terraform for executing Terraform CLI commands.
|
||||
// The `datadir` is the location to which the terraform plan (tf files, etc) has been unpacked.
|
||||
// The `terraformDir` is the location to which Terraform, provider binaries, & .terraform data dir have been unpacked.
|
||||
// The stdout and stderr will be sent to the logger at the debug and error levels,
|
||||
// respectively.
|
||||
func newTFExec(datadir string, terraformDir string) (*tfexec.Terraform, error) {
|
||||
tfPath := filepath.Join(terraformDir, "bin", "terraform")
|
||||
tf, err := tfexec.NewTerraform(datadir, tfPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// terraform-exec will not accept debug logs unless a log file path has
|
||||
// been specified. And it makes sense since the logging is very verbose.
|
||||
if path, ok := os.LookupEnv("TF_LOG_PATH"); ok {
|
||||
// These might fail if tf cli does not have a compatible version. Since
|
||||
// the exact same check is repeated, we just have to verify error once
|
||||
// for all calls
|
||||
if err := tf.SetLog(os.Getenv("TF_LOG")); err != nil {
|
||||
// We want to skip setting the log path since tf-exec lib will
|
||||
// default to TRACE log levels which can risk leaking sensitive
|
||||
// data
|
||||
logrus.Infof("Skipping setting terraform log levels: %v", err)
|
||||
} else {
|
||||
tf.SetLogCore(os.Getenv("TF_LOG_CORE")) //nolint:errcheck
|
||||
tf.SetLogProvider(os.Getenv("TF_LOG_PROVIDER")) //nolint:errcheck
|
||||
// This never returns any errors despite its signature
|
||||
tf.SetLogPath(path) //nolint:errcheck
|
||||
}
|
||||
}
|
||||
|
||||
// Add terraform info logs to the installer log
|
||||
lpDebug := &lineprinter.LinePrinter{Print: (&lineprinter.Trimmer{WrappedPrint: logrus.Debug}).Print}
|
||||
lpError := &lineprinter.LinePrinter{Print: (&lineprinter.Trimmer{WrappedPrint: logrus.Error}).Print}
|
||||
defer lpDebug.Close()
|
||||
defer lpError.Close()
|
||||
|
||||
tf.SetStdout(lpDebug)
|
||||
tf.SetStderr(lpError)
|
||||
tf.SetLogger(newPrintfer())
|
||||
|
||||
// Set the Terraform data dir to be the same as the terraformDir so that
|
||||
// files we unpack are contained and, more importantly, we can ensure the
|
||||
// provider binaries unpacked in the Terraform data dir have the same permission
|
||||
// levels as the Terraform binary.
|
||||
dd := path.Join(terraformDir, ".terraform")
|
||||
os.Setenv("TF_DATA_DIR", dd)
|
||||
|
||||
return tf, nil
|
||||
}
|
||||
|
||||
// Apply unpacks the platform-specific Terraform modules into the
|
||||
// given directory and then runs 'terraform init' and 'terraform
|
||||
// apply'.
|
||||
func Apply(dir string, platform string, stage Stage, terraformDir string, extraOpts ...tfexec.ApplyOption) error {
|
||||
if err := unpackAndInit(dir, platform, stage.Name(), terraformDir, stage.Providers()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tf, err := newTFExec(dir, terraformDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create a new tfexec")
|
||||
}
|
||||
err = tf.Apply(context.Background(), extraOpts...)
|
||||
return errors.Wrap(diagnoseApplyError(err), "failed to apply Terraform")
|
||||
}
|
||||
|
||||
// Destroy unpacks the platform-specific Terraform modules into the
|
||||
// given directory and then runs 'terraform init' and 'terraform
|
||||
// destroy'.
|
||||
func Destroy(dir string, platform string, stage Stage, terraformDir string, extraOpts ...tfexec.DestroyOption) error {
|
||||
if err := unpackAndInit(dir, platform, stage.Name(), terraformDir, stage.Providers()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tf, err := newTFExec(dir, terraformDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create a new tfexec")
|
||||
}
|
||||
return errors.Wrap(
|
||||
tf.Destroy(context.Background(), extraOpts...),
|
||||
"failed doing terraform destroy",
|
||||
)
|
||||
}
|
||||
|
||||
func applyStage(platform string, stage Stage, terraformDir string, tfvarsFiles []*asset.File) (*asset.File, *asset.File, error) {
|
||||
// Copy the terraform.tfvars to a temp directory which will contain the terraform plan.
|
||||
tmpDir, err := os.MkdirTemp("", fmt.Sprintf("openshift-install-%s-", stage.Name()))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to create temp dir for terraform execution")
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
extraOpts := []tfexec.ApplyOption{}
|
||||
for _, file := range tfvarsFiles {
|
||||
if err := os.WriteFile(filepath.Join(tmpDir, file.Filename), file.Data, 0o600); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
extraOpts = append(extraOpts, tfexec.VarFile(filepath.Join(tmpDir, file.Filename)))
|
||||
}
|
||||
|
||||
return applyTerraform(tmpDir, platform, stage, terraformDir, extraOpts...)
|
||||
}
|
||||
|
||||
func applyTerraform(tmpDir string, platform string, stage Stage, terraformDir string, opts ...tfexec.ApplyOption) (outputsFile, stateFile *asset.File, err error) {
|
||||
timer.StartTimer(stage.Name())
|
||||
defer timer.StopTimer(stage.Name())
|
||||
|
||||
applyErr := Apply(tmpDir, platform, stage, terraformDir, opts...)
|
||||
|
||||
if data, err := os.ReadFile(filepath.Join(tmpDir, StateFilename)); err == nil {
|
||||
stateFile = &asset.File{
|
||||
Filename: stage.StateFilename(),
|
||||
Data: data,
|
||||
}
|
||||
} else if !os.IsNotExist(err) {
|
||||
logrus.Errorf("Failed to read tfstate: %v", err)
|
||||
return nil, nil, errors.Wrap(err, "failed to read tfstate")
|
||||
}
|
||||
|
||||
if applyErr != nil {
|
||||
return nil, stateFile, fmt.Errorf("error applying Terraform configs: %w", applyErr)
|
||||
}
|
||||
|
||||
outputs, err := Outputs(tmpDir, terraformDir)
|
||||
if err != nil {
|
||||
return nil, stateFile, errors.Wrapf(err, "could not get outputs from stage %q", stage.Name())
|
||||
}
|
||||
|
||||
outputsFile = &asset.File{
|
||||
Filename: stage.OutputsFilename(),
|
||||
Data: outputs,
|
||||
}
|
||||
return outputsFile, stateFile, nil
|
||||
}
|
||||
|
||||
func copyFile(from string, to string) error {
|
||||
data, err := os.ReadFile(from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(to, data, 0o666) //nolint:gosec // state file doesn't need to be 0600
|
||||
}
|
||||
Reference in New Issue
Block a user