mirror of
https://github.com/openshift/installer.git
synced 2026-02-05 06:46:36 +01:00
fixup! data/cluster-api: add cluster-api components
Signed-off-by: Vince Prignano <vincepri@redhat.com>
This commit is contained in:
@@ -198,7 +198,7 @@ func clusterCreatePostRun(ctx context.Context) (int, error) {
|
||||
"Warning: this should only be used for debugging purposes, and poses a risk to cluster stability.")
|
||||
} else {
|
||||
logrus.Info("Destroying the bootstrap resources...")
|
||||
err = destroybootstrap.Destroy(command.RootOpts.Dir)
|
||||
err = destroybootstrap.Destroy(ctx, command.RootOpts.Dir)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
@@ -125,7 +126,7 @@ func newDestroyBootstrapCmd() *cobra.Command {
|
||||
defer cleanup()
|
||||
|
||||
timer.StartTimer(timer.TotalTimeElapsed)
|
||||
err := bootstrap.Destroy(command.RootOpts.Dir)
|
||||
err := bootstrap.Destroy(context.TODO(), command.RootOpts.Dir)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -5,17 +5,29 @@ import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
|
||||
utilkubeconfig "sigs.k8s.io/cluster-api/util/kubeconfig"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/cluster/aws"
|
||||
"github.com/openshift/installer/pkg/asset/cluster/azure"
|
||||
"github.com/openshift/installer/pkg/asset/cluster/openstack"
|
||||
"github.com/openshift/installer/pkg/asset/installconfig"
|
||||
awsconfig "github.com/openshift/installer/pkg/asset/installconfig/aws"
|
||||
"github.com/openshift/installer/pkg/asset/kubeconfig"
|
||||
"github.com/openshift/installer/pkg/asset/manifests/capiutils"
|
||||
capimanifests "github.com/openshift/installer/pkg/asset/manifests/clusterapi"
|
||||
"github.com/openshift/installer/pkg/asset/password"
|
||||
"github.com/openshift/installer/pkg/asset/quota"
|
||||
"github.com/openshift/installer/pkg/clusterapi"
|
||||
infra "github.com/openshift/installer/pkg/infrastructure/platform"
|
||||
typesaws "github.com/openshift/installer/pkg/types/aws"
|
||||
typesazure "github.com/openshift/installer/pkg/types/azure"
|
||||
@@ -56,6 +68,8 @@ func (c *Cluster) Dependencies() []asset.Asset {
|
||||
"a.PlatformQuotaCheck{},
|
||||
&TerraformVariables{},
|
||||
&password.KubeadminPassword{},
|
||||
&capimanifests.Cluster{},
|
||||
&kubeconfig.AdminClient{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,6 +96,16 @@ func (c *Cluster) Generate(parents asset.Parents) (err error) {
|
||||
return errors.New("cluster cannot be created with bootstrapInPlace set")
|
||||
}
|
||||
|
||||
// Check if we're using Cluster API.
|
||||
if capiutils.IsEnabled(installConfig) {
|
||||
return c.provisionWithClusterAPI(context.TODO(), parents, installConfig, clusterID)
|
||||
}
|
||||
|
||||
// Otherwise, use the normal path.
|
||||
return c.provision(installConfig, clusterID, terraformVariables)
|
||||
}
|
||||
|
||||
func (c *Cluster) provision(installConfig *installconfig.InstallConfig, clusterID *installconfig.ClusterID, terraformVariables *TerraformVariables) error {
|
||||
platform := installConfig.Config.Platform.Name()
|
||||
|
||||
if azure := installConfig.Config.Platform.Azure; azure != nil && azure.CloudName == typesazure.StackCloud {
|
||||
@@ -124,6 +148,137 @@ func (c *Cluster) Generate(parents asset.Parents) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) provisionWithClusterAPI(ctx context.Context, parents asset.Parents, installConfig *installconfig.InstallConfig, clusterID *installconfig.ClusterID) error {
|
||||
capiManifests := &capimanifests.Cluster{}
|
||||
clusterKubeconfigAsset := &kubeconfig.AdminClient{}
|
||||
parents.Get(
|
||||
capiManifests,
|
||||
clusterKubeconfigAsset,
|
||||
)
|
||||
|
||||
// Only need the objects--not the files.
|
||||
manifests := []client.Object{}
|
||||
for _, m := range capiManifests.RuntimeFiles() {
|
||||
manifests = append(manifests, m.Object)
|
||||
}
|
||||
|
||||
// Run the CAPI system.
|
||||
capiSystem := clusterapi.System()
|
||||
if err := capiSystem.Run(ctx, installConfig); err != nil {
|
||||
return fmt.Errorf("failed to run cluster api system: %w", err)
|
||||
}
|
||||
|
||||
// Grab the client.
|
||||
cl := capiSystem.Client()
|
||||
|
||||
// Create all the manifests and store them.
|
||||
for _, m := range manifests {
|
||||
m.SetNamespace(capiutils.Namespace)
|
||||
if err := cl.Create(context.Background(), m); err != nil {
|
||||
return fmt.Errorf("failed to create manifest: %w", err)
|
||||
}
|
||||
logrus.Infof("Created manifest %+T, namespace=%s name=%s", m, m.GetNamespace(), m.GetName())
|
||||
}
|
||||
|
||||
// Pass cluster kubeconfig and store it in; this is usually the role of a bootstrap provider.
|
||||
{
|
||||
key := client.ObjectKey{
|
||||
Name: clusterID.InfraID,
|
||||
Namespace: capiutils.Namespace,
|
||||
}
|
||||
cluster := &clusterv1.Cluster{}
|
||||
if err := cl.Get(context.Background(), key, cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
// Create the secret.
|
||||
clusterKubeconfig := clusterKubeconfigAsset.Files()[0].Data
|
||||
secret := utilkubeconfig.GenerateSecret(cluster, clusterKubeconfig)
|
||||
if err := cl.Create(context.Background(), secret); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for the load balancer to be ready by checking the control plane endpoint
|
||||
// on the cluster object.
|
||||
var cluster *clusterv1.Cluster
|
||||
{
|
||||
if err := wait.ExponentialBackoff(wait.Backoff{
|
||||
Duration: time.Second * 10,
|
||||
Factor: float64(1.5),
|
||||
Steps: 32,
|
||||
}, func() (bool, error) {
|
||||
c := &clusterv1.Cluster{}
|
||||
if err := cl.Get(context.Background(), client.ObjectKey{
|
||||
Name: clusterID.InfraID,
|
||||
Namespace: capiutils.Namespace,
|
||||
}, c); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
cluster = c
|
||||
return cluster.Spec.ControlPlaneEndpoint.IsValid(), nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if cluster == nil {
|
||||
return errors.New("error occurred during load balancer ready check")
|
||||
}
|
||||
if cluster.Spec.ControlPlaneEndpoint.Host == "" {
|
||||
return errors.New("control plane endpoint is not set")
|
||||
}
|
||||
}
|
||||
|
||||
// Run the post-provisioning steps for the platform we're on.
|
||||
// TODO(vincepri): The following should probably be in a separate package with a clear
|
||||
// interface and multiple hooks at different stages of the cluster lifecycle.
|
||||
switch installConfig.Config.Platform.Name() {
|
||||
case typesaws.Name:
|
||||
ssn, err := installConfig.AWS.Session(context.TODO())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create session: %w", err)
|
||||
}
|
||||
client := awsconfig.NewClient(ssn)
|
||||
r53cfg := awsconfig.GetR53ClientCfg(ssn, "")
|
||||
err = client.CreateOrUpdateRecord(installConfig.Config, cluster.Spec.ControlPlaneEndpoint.Host, r53cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create route53 records: %w", err)
|
||||
}
|
||||
logrus.Infof("Created Route53 records to control plane load balancer.")
|
||||
default:
|
||||
}
|
||||
|
||||
// For each manifest we created, retrieve it and store it in the asset.
|
||||
for _, m := range manifests {
|
||||
key := client.ObjectKey{
|
||||
Name: m.GetName(),
|
||||
Namespace: m.GetNamespace(),
|
||||
}
|
||||
if err := cl.Get(context.Background(), key, m); err != nil {
|
||||
return fmt.Errorf("failed to get manifest: %w", err)
|
||||
}
|
||||
|
||||
gvk, err := cl.GroupVersionKindFor(m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get GVK for manifest: %w", err)
|
||||
}
|
||||
fileName := fmt.Sprintf("%s-%s-%s.yaml", gvk.Kind, m.GetNamespace(), m.GetName())
|
||||
objData, err := yaml.Marshal(m)
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("failed to create infrastructure manifest %s from InstallConfig", fileName)
|
||||
return errors.Wrapf(err, errMsg)
|
||||
}
|
||||
c.FileList = append(c.FileList, &asset.File{
|
||||
Filename: fileName,
|
||||
Data: objData,
|
||||
})
|
||||
}
|
||||
|
||||
logrus.Infof("Cluster API resources have been created. Waiting for cluster to become ready...")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Files returns the FileList generated by the asset.
|
||||
func (c *Cluster) Files() []*asset.File {
|
||||
return c.FileList
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
awss "github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/route53"
|
||||
"github.com/pkg/errors"
|
||||
@@ -146,3 +147,73 @@ func GetR53ClientCfg(sess *awss.Session, roleARN string) *aws.Config {
|
||||
creds := stscreds.NewCredentials(sess, roleARN)
|
||||
return &aws.Config{Credentials: creds}
|
||||
}
|
||||
|
||||
// CreateOrUpdateRecord Creates or Updates the Route53 Record for the cluster endpoint.
|
||||
func (c *Client) CreateOrUpdateRecord(ic *types.InstallConfig, target string, cfg *aws.Config) error {
|
||||
zone, err := c.GetBaseDomain(ic.BaseDomain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
params := &route53.ChangeResourceRecordSetsInput{
|
||||
ChangeBatch: &route53.ChangeBatch{
|
||||
Comment: aws.String(fmt.Sprintf("Creating record for api and api-int in domain %s", ic.ClusterDomain())),
|
||||
},
|
||||
HostedZoneId: zone.Id,
|
||||
}
|
||||
for _, prefix := range []string{"api", "api-int"} {
|
||||
params.ChangeBatch.Changes = append(params.ChangeBatch.Changes, &route53.Change{
|
||||
Action: aws.String("UPSERT"),
|
||||
ResourceRecordSet: &route53.ResourceRecordSet{
|
||||
Name: aws.String(fmt.Sprintf("%s.%s.", prefix, ic.ClusterDomain())),
|
||||
Type: aws.String("A"),
|
||||
AliasTarget: &route53.AliasTarget{
|
||||
DNSName: aws.String(target),
|
||||
HostedZoneId: aws.String(hostedZoneIDPerRegionNLBMap[ic.AWS.Region]),
|
||||
EvaluateTargetHealth: aws.Bool(true),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
svc := route53.New(c.ssn, cfg)
|
||||
if _, err := svc.ChangeResourceRecordSets(params); err != nil {
|
||||
return fmt.Errorf("failed to create records for api/api-int: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// See https://docs.aws.amazon.com/general/latest/gr/elb.html#elb_region
|
||||
|
||||
var hostedZoneIDPerRegionNLBMap = map[string]string{
|
||||
endpoints.AfSouth1RegionID: "Z203XCE67M25HM",
|
||||
endpoints.ApEast1RegionID: "Z12Y7K3UBGUAD1",
|
||||
endpoints.ApNortheast1RegionID: "Z31USIVHYNEOWT",
|
||||
endpoints.ApNortheast2RegionID: "ZIBE1TIR4HY56",
|
||||
endpoints.ApNortheast3RegionID: "Z1GWIQ4HH19I5X",
|
||||
endpoints.ApSouth1RegionID: "ZVDDRBQ08TROA",
|
||||
endpoints.ApSouth2RegionID: "Z0711778386UTO08407HT",
|
||||
endpoints.ApSoutheast1RegionID: "ZKVM4W9LS7TM",
|
||||
endpoints.ApSoutheast2RegionID: "ZCT6FZBF4DROD",
|
||||
endpoints.ApSoutheast3RegionID: "Z01971771FYVNCOVWJU1G",
|
||||
endpoints.ApSoutheast4RegionID: "Z01156963G8MIIL7X90IV",
|
||||
endpoints.CaCentral1RegionID: "Z2EPGBW3API2WT",
|
||||
endpoints.CnNorth1RegionID: "Z3QFB96KMJ7ED6",
|
||||
endpoints.CnNorthwest1RegionID: "ZQEIKTCZ8352D",
|
||||
endpoints.EuCentral1RegionID: "Z3F0SRJ5LGBH90",
|
||||
endpoints.EuCentral2RegionID: "Z02239872DOALSIDCX66S",
|
||||
endpoints.EuNorth1RegionID: "Z1UDT6IFJ4EJM",
|
||||
endpoints.EuSouth1RegionID: "Z23146JA1KNAFP",
|
||||
endpoints.EuSouth2RegionID: "Z1011216NVTVYADP1SSV",
|
||||
endpoints.EuWest1RegionID: "Z2IFOLAFXWLO4F",
|
||||
endpoints.EuWest2RegionID: "ZD4D7Y8KGAS4G",
|
||||
endpoints.EuWest3RegionID: "Z1CMS0P5QUZ6D5",
|
||||
endpoints.MeCentral1RegionID: "Z00282643NTTLPANJJG2P",
|
||||
endpoints.MeSouth1RegionID: "Z3QSRYVP46NYYV",
|
||||
endpoints.SaEast1RegionID: "ZTK26PT1VY4CU",
|
||||
endpoints.UsEast1RegionID: "Z26RNL4JYFTOTI",
|
||||
endpoints.UsEast2RegionID: "ZLMOA37VPKANP",
|
||||
endpoints.UsGovEast1RegionID: "Z1ZSMQQ6Q24QQ8",
|
||||
endpoints.UsGovWest1RegionID: "ZMG1MZ2THAWF1",
|
||||
endpoints.UsWest1RegionID: "Z24FKFUX50B4VW",
|
||||
endpoints.UsWest2RegionID: "Z18D5FSROUN65G",
|
||||
}
|
||||
|
||||
@@ -108,7 +108,7 @@ func GenerateClusterAPI(ctx context.Context, installConfig *installconfig.Instal
|
||||
// delete the machine when the stage is complete.
|
||||
bootstrapAWSMachine := &capa.AWSMachine{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s-bootstrap", clusterID.InfraID, pool.Name),
|
||||
Name: capiutils.GenerateBoostrapMachineName(clusterID.InfraID),
|
||||
Labels: map[string]string{
|
||||
"cluster.x-k8s.io/control-plane": "",
|
||||
"install.openshift.io/bootstrap": "",
|
||||
|
||||
@@ -23,3 +23,9 @@ func CIDRFromInstallConfig(installConfig *installconfig.InstallConfig) *ipnet.IP
|
||||
func IsEnabled(installConfig *installconfig.InstallConfig) bool {
|
||||
return installConfig.Config.EnabledFeatureGates().Enabled(v1.FeatureGateClusterAPIInstall)
|
||||
}
|
||||
|
||||
// GenerateBoostrapMachineName generates the Cluster API Machine used for bootstrapping
|
||||
// from the cluster ID and machine type.
|
||||
func GenerateBoostrapMachineName(infraID string) string {
|
||||
return infraID + "-bootstrap"
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ func (c *Cluster) Generate(dependencies asset.Parents) error {
|
||||
Name: capiutils.Namespace,
|
||||
},
|
||||
}
|
||||
c.FileList = append(c.FileList, &asset.RuntimeFile{Object: namespace, File: asset.File{Filename: "00_capi-namespace.yaml"}})
|
||||
c.FileList = append(c.FileList, &asset.RuntimeFile{Object: namespace, File: asset.File{Filename: "000_capi-namespace.yaml"}})
|
||||
|
||||
cluster := &clusterv1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
||||
@@ -56,10 +56,10 @@ func (c *localControlPlane) Run(ctx context.Context) error {
|
||||
// Create a temporary directory to unpack the cluster-api binaries.
|
||||
c.BinDir = filepath.Join(command.RootOpts.Dir, "bin", "cluster-api")
|
||||
if err := UnpackClusterAPIBinary(c.BinDir); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to unpack cluster-api binary: %w", err)
|
||||
}
|
||||
if err := UnpackEnvtestBinaries(c.BinDir); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to unpack envtest binaries: %w", err)
|
||||
}
|
||||
|
||||
log.SetLogger(klog.NewKlogr())
|
||||
|
||||
@@ -100,7 +100,7 @@ func (p Provider) Extract(dir string) error {
|
||||
}
|
||||
|
||||
// Ensure the directory exists.
|
||||
logrus.Debugf("creating %s directory", dir)
|
||||
logrus.Debugf("Creating %s directory", dir)
|
||||
if err := os.MkdirAll(dir, 0o777); err != nil {
|
||||
return errors.Wrapf(err, "could not make directory for the %s provider", p.Name)
|
||||
}
|
||||
@@ -115,7 +115,7 @@ func (p Provider) Extract(dir string) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to sanitize archive file %q", name)
|
||||
}
|
||||
logrus.Debugf("extracting %s file", path)
|
||||
logrus.Debugf("Extracting %s file", path)
|
||||
if err := unpackFile(f, path); err != nil {
|
||||
return errors.Wrapf(err, "failed to extract %q", path)
|
||||
}
|
||||
|
||||
@@ -32,9 +32,20 @@ var (
|
||||
sys = &system{}
|
||||
)
|
||||
|
||||
// SystemState is the state of the cluster-api system.
|
||||
type SystemState string
|
||||
|
||||
const (
|
||||
// SystemStateRunning indicates the system is running.
|
||||
SystemStateRunning SystemState = "running"
|
||||
// SystemStateStopped indicates the system is stopped.
|
||||
SystemStateStopped SystemState = "stopped"
|
||||
)
|
||||
|
||||
// Interface is the interface for the cluster-api system.
|
||||
type Interface interface {
|
||||
Run(ctx context.Context, installConfig *installconfig.InstallConfig) error
|
||||
State() SystemState
|
||||
Client() client.Client
|
||||
Teardown()
|
||||
}
|
||||
@@ -47,6 +58,8 @@ func System() Interface {
|
||||
// system creates a local capi control plane
|
||||
// to use as a management cluster.
|
||||
type system struct {
|
||||
sync.Mutex
|
||||
|
||||
client client.Client
|
||||
|
||||
componentDir string
|
||||
@@ -59,25 +72,29 @@ type system struct {
|
||||
|
||||
// Run launches the cluster-api system.
|
||||
func (c *system) Run(ctx context.Context, installConfig *installconfig.InstallConfig) error {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// Setup the context with a cancel function.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
c.cancel = cancel
|
||||
|
||||
// Create the local control plane.
|
||||
c.lcp = &localControlPlane{}
|
||||
if err := c.lcp.Run(ctx); err != nil {
|
||||
lcp := &localControlPlane{}
|
||||
if err := lcp.Run(ctx); err != nil {
|
||||
return fmt.Errorf("failed to run local control plane: %w", err)
|
||||
}
|
||||
c.lcp = lcp
|
||||
c.client = c.lcp.Client
|
||||
|
||||
// Create a temporary directory to unpack the cluster-api assets
|
||||
// and use it as the working directory for the envtest environment.
|
||||
componentDir, err := os.MkdirTemp("", "openshift-cluster-api-system-components")
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to create temporary folder for cluster api components: %w", err)
|
||||
}
|
||||
if err := data.Unpack(componentDir, "/cluster-api"); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to unpack cluster api components: %w", err)
|
||||
}
|
||||
c.componentDir = componentDir
|
||||
|
||||
@@ -202,11 +219,17 @@ func (c *system) Run(ctx context.Context, installConfig *installconfig.InstallCo
|
||||
|
||||
// Client returns the client for the local control plane.
|
||||
func (c *system) Client() client.Client {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
return c.client
|
||||
}
|
||||
|
||||
// Teardown shuts down the local capi control plane and all its controllers.
|
||||
func (c *system) Teardown() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if c.lcp == nil {
|
||||
return
|
||||
}
|
||||
@@ -232,6 +255,17 @@ func (c *system) Teardown() {
|
||||
})
|
||||
}
|
||||
|
||||
// State returns the state of the cluster-api system.
|
||||
func (c *system) State() SystemState {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if c.lcp == nil {
|
||||
return SystemStateStopped
|
||||
}
|
||||
return SystemStateRunning
|
||||
}
|
||||
|
||||
// getInfrastructureController returns a controller for the given provider,
|
||||
// most of the configuration is by convention.
|
||||
//
|
||||
|
||||
@@ -2,19 +2,26 @@
|
||||
package bootstrap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
"github.com/openshift/installer/pkg/asset/cluster"
|
||||
openstackasset "github.com/openshift/installer/pkg/asset/cluster/openstack"
|
||||
"github.com/openshift/installer/pkg/asset/manifests/capiutils"
|
||||
"github.com/openshift/installer/pkg/clusterapi"
|
||||
osp "github.com/openshift/installer/pkg/destroy/openstack"
|
||||
infra "github.com/openshift/installer/pkg/infrastructure/platform"
|
||||
ibmcloudtfvars "github.com/openshift/installer/pkg/tfvars/ibmcloud"
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
typesazure "github.com/openshift/installer/pkg/types/azure"
|
||||
"github.com/openshift/installer/pkg/types/featuregates"
|
||||
ibmcloudtypes "github.com/openshift/installer/pkg/types/ibmcloud"
|
||||
@@ -22,12 +29,16 @@ import (
|
||||
)
|
||||
|
||||
// Destroy uses Terraform to remove bootstrap resources.
|
||||
func Destroy(dir string) (err error) {
|
||||
func Destroy(ctx context.Context, dir string) (err error) {
|
||||
metadata, err := cluster.LoadMetadata(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sys := clusterapi.System(); sys.State() == clusterapi.SystemStateRunning {
|
||||
return destroyBoostrapMachine(ctx, sys.Client(), metadata)
|
||||
}
|
||||
|
||||
platform := metadata.Platform()
|
||||
if platform == "" {
|
||||
return errors.New("no platform configured in metadata")
|
||||
@@ -83,3 +94,15 @@ func Destroy(dir string) (err error) {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func destroyBoostrapMachine(ctx context.Context, c client.Client, metadata *types.ClusterMetadata) error {
|
||||
if err := c.Delete(ctx, &clusterv1.Machine{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: capiutils.GenerateBoostrapMachineName(metadata.InfraID),
|
||||
Namespace: capiutils.Namespace,
|
||||
},
|
||||
}); client.IgnoreNotFound(err) != nil {
|
||||
return fmt.Errorf("failed to delete bootstrap machine: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user