mirror of
https://github.com/openshift/installer.git
synced 2026-02-05 15:47:14 +01:00
cmd/openshift-install/waitfor: Rename from user-provided-infrastructure
There is no hard line between installer- and user-provided infrastructure. Rename these commands to focus on what they'll do instead of the work-flow into which we expect them to fit. We're still working out how we can drop the router-CA injection to avoid 'wait-for cluster-ready' surprising users my editing their on-disk kubeconfig [1]. But that's mitigated somewhat by the fact that addRouterCAToClusterCA is idempotent, because AppendCertsFromPEM wraps AddCert [2] and AddCert checks to avoid duplicate certificates [3]. [1]: https://github.com/openshift/installer/pull/1541 [2]: https://github.com/golang/go/blob/go1.12/src/crypto/x509/cert_pool.go#L144 [3]: https://github.com/golang/go/blob/go1.12/src/crypto/x509/cert_pool.go#L106-L109
This commit is contained in:
@@ -105,7 +105,7 @@ var (
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
err = finish(ctx, config, rootOpts.dir)
|
||||
err = waitForClusterReady(ctx, config, rootOpts.dir)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
@@ -433,7 +433,7 @@ func logComplete(directory, consoleURL string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func finish(ctx context.Context, config *rest.Config, directory string) error {
|
||||
func waitForClusterReady(ctx context.Context, config *rest.Config, directory string) error {
|
||||
if err := waitForInitializedCluster(ctx, config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func installerMain() {
|
||||
for _, subCmd := range []*cobra.Command{
|
||||
newCreateCmd(),
|
||||
newDestroyCmd(),
|
||||
newUPICmd(),
|
||||
newWaitForCmd(),
|
||||
newVersionCmd(),
|
||||
newGraphCmd(),
|
||||
newCompletionCmd(),
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
var (
|
||||
upiLong = `Entry-points for user-provided infrastructure.
|
||||
|
||||
Most users will want to use 'create cluster' to have the installer
|
||||
create the required infrastructure for their cluster. But in some
|
||||
installations the infrastructure needs to be adapted in ways that
|
||||
installer-created infrastructure does not support. This command
|
||||
provides entry points to support the following workflow:
|
||||
|
||||
1. Call 'create ignition-configs' to create the bootstrap Ignition
|
||||
config and admin kubeconfig.
|
||||
2. Creates all required cluster resources, after which the cluster
|
||||
will begin bootstrapping.
|
||||
3. Call 'user-provided-infrastructure bootstrap-complete' to wait
|
||||
until the bootstrap phase has completed.
|
||||
4. Destroy the bootstrap resources.
|
||||
5. Call 'user-provided-infrastructure finish' to wait until the
|
||||
cluster finishes deploying its initial version. This also
|
||||
retrieves the router certificate authority from the cluster and
|
||||
inserts it into the admin kubeconfig.`
|
||||
)
|
||||
|
||||
func newUPICmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "user-provided-infrastructure",
|
||||
Aliases: []string{"upi"},
|
||||
Short: "Entry-points for user-provided infrastructure",
|
||||
Long: upiLong,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
cmd.AddCommand(newUPIBootstrapCompleteCmd())
|
||||
cmd.AddCommand(newUPIFinishCmd())
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newUPIBootstrapCompleteCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "bootstrap-complete",
|
||||
Short: "Wait until cluster bootstrapping has completed",
|
||||
Args: cobra.ExactArgs(0),
|
||||
Run: func(_ *cobra.Command, _ []string) {
|
||||
ctx := context.Background()
|
||||
|
||||
cleanup := setupFileHook(rootOpts.dir)
|
||||
defer cleanup()
|
||||
|
||||
config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(rootOpts.dir, "auth", "kubeconfig"))
|
||||
if err != nil {
|
||||
logrus.Fatal(errors.Wrap(err, "loading kubeconfig"))
|
||||
}
|
||||
|
||||
err = waitForBootstrapComplete(ctx, config, rootOpts.dir)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Info("It is now safe to remove the bootstrap resources")
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newUPIFinishCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "finish",
|
||||
Short: "Wait for the cluster to finish updating and update local resources",
|
||||
Args: cobra.ExactArgs(0),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
ctx := context.Background()
|
||||
|
||||
cleanup := setupFileHook(rootOpts.dir)
|
||||
defer cleanup()
|
||||
|
||||
config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(rootOpts.dir, "auth", "kubeconfig"))
|
||||
if err != nil {
|
||||
logrus.Fatal(errors.Wrap(err, "loading kubeconfig"))
|
||||
}
|
||||
|
||||
err = finish(ctx, config, rootOpts.dir)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
79
cmd/openshift-install/waitfor.go
Normal file
79
cmd/openshift-install/waitfor.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
func newWaitForCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "wait-for",
|
||||
Short: "Wait for install-time events",
|
||||
Long: `Wait for install-time events.
|
||||
|
||||
'create cluster' has a few stages that wait for cluster events. But
|
||||
these waits can also be useful on their own. This subcommand exposes
|
||||
them directly.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
cmd.AddCommand(newWaitForBootstrapCompleteCmd())
|
||||
cmd.AddCommand(newWaitForClusterReadyCmd())
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newWaitForBootstrapCompleteCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "bootstrap-complete",
|
||||
Short: "Wait until cluster bootstrapping has completed",
|
||||
Args: cobra.ExactArgs(0),
|
||||
Run: func(_ *cobra.Command, _ []string) {
|
||||
ctx := context.Background()
|
||||
|
||||
cleanup := setupFileHook(rootOpts.dir)
|
||||
defer cleanup()
|
||||
|
||||
config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(rootOpts.dir, "auth", "kubeconfig"))
|
||||
if err != nil {
|
||||
logrus.Fatal(errors.Wrap(err, "loading kubeconfig"))
|
||||
}
|
||||
|
||||
err = waitForBootstrapComplete(ctx, config, rootOpts.dir)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Info("It is now safe to remove the bootstrap resources")
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newWaitForClusterReadyCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "cluster-ready",
|
||||
Short: "Wait until the cluster is ready",
|
||||
Args: cobra.ExactArgs(0),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
ctx := context.Background()
|
||||
|
||||
cleanup := setupFileHook(rootOpts.dir)
|
||||
defer cleanup()
|
||||
|
||||
config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(rootOpts.dir, "auth", "kubeconfig"))
|
||||
if err != nil {
|
||||
logrus.Fatal(errors.Wrap(err, "loading kubeconfig"))
|
||||
}
|
||||
|
||||
err = waitForClusterReady(ctx, config, rootOpts.dir)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -84,7 +84,7 @@ and load balancer configuration.
|
||||
## Monitor for `bootstrap-complete` and Initialization
|
||||
|
||||
```console
|
||||
$ bin/openshift-install user-provided-infrastructure bootstrap-complete
|
||||
$ bin/openshift-install wait-for bootstrap-complete
|
||||
INFO Waiting up to 30m0s for the Kubernetes API at https://api.test.example.com:6443...
|
||||
INFO API v1.12.4+c53f462 up
|
||||
INFO Waiting up to 30m0s for the bootstrap-complete event...
|
||||
@@ -248,7 +248,7 @@ TODO: Identify changes needed to Router or Ingress for DNS `*.apps` registration
|
||||
## Monitor for Cluster Completion
|
||||
|
||||
```console
|
||||
$ bin/openshift-install user-provided-infrastructure finish
|
||||
$ bin/openshift-install wait-for cluster-ready
|
||||
INFO Waiting up to 30m0s for the cluster to initialize...
|
||||
```
|
||||
|
||||
|
||||
@@ -202,10 +202,10 @@ TODO RHEL CoreOS does not have assets for bare-metal.
|
||||
|
||||
### Monitor for bootstrap-complete
|
||||
|
||||
The administrators can use the `upi bootstrap-complete` target of the OpenShift Installer to monitor cluster bootstrapping. The command succeeds when it notices `bootstrap-complete` event from Kubernetes APIServer. This event is generated by the bootstrap machine after the Kubernetes APIServer has been bootstrapped on the control plane machines. For example,
|
||||
The administrators can use the `wait-for bootstrap-complete` target of the OpenShift Installer to monitor cluster bootstrapping. The command succeeds when it notices `bootstrap-complete` event from Kubernetes APIServer. This event is generated by the bootstrap machine after the Kubernetes APIServer has been bootstrapped on the control plane machines. For example,
|
||||
|
||||
```console
|
||||
$ openshift-install --dir test-bare-metal upi bootstrap-complete
|
||||
$ openshift-install --dir test-bare-metal wait-for bootstrap-complete
|
||||
INFO Waiting up to 30m0s for the Kubernetes API at https://api.test.example.com:6443...
|
||||
INFO API v1.12.4+c53f462 up
|
||||
INFO Waiting up to 30m0s for the bootstrap-complete event...
|
||||
@@ -213,10 +213,10 @@ INFO Waiting up to 30m0s for the bootstrap-complete event...
|
||||
|
||||
## Monitor for cluster completion
|
||||
|
||||
The administrators can use the `upi finish` target of the OpenShift Installer to monitor cluster completion. The command succeeds when it notices that Cluster Version Operator has completed rolling out the OpenShift cluster from Kubernetes APIServer.
|
||||
The administrators can use the `wait-for cluster-ready` target of the OpenShift Installer to monitor cluster completion. The command succeeds when it notices that Cluster Version Operator has completed rolling out the OpenShift cluster from Kubernetes APIServer.
|
||||
|
||||
```console
|
||||
$ openshift-install upi finish
|
||||
$ openshift-install wait-for cluster-ready
|
||||
INFO Waiting up to 30m0s for the cluster to initialize...
|
||||
```
|
||||
|
||||
|
||||
Reference in New Issue
Block a user