mirror of
https://github.com/rancher/cli.git
synced 2026-02-05 09:48:36 +01:00
Adding of new commands
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.7.3
|
||||
FROM golang:1.9.3
|
||||
RUN apt-get update && \
|
||||
apt-get install -y xz-utils zip rsync
|
||||
RUN go get github.com/rancher/trash
|
||||
@@ -10,7 +10,6 @@ ENV DAPPER_SOURCE /go/src/github.com/rancher/cli
|
||||
ENV DAPPER_OUTPUT bin build/bin dist
|
||||
ENV DAPPER_DOCKER_SOCKET true
|
||||
ENV DAPPER_ENV TAG REPO GOOS CROSS
|
||||
ENV GO15VENDOREXPERIMENT 1
|
||||
ENV TRASH_CACHE ${DAPPER_SOURCE}/.trash-cache
|
||||
WORKDIR ${DAPPER_SOURCE}
|
||||
ENTRYPOINT ["./scripts/entry"]
|
||||
|
||||
33
README.md
33
README.md
@@ -1,34 +1,27 @@
|
||||
Rancher CLI
|
||||
===========
|
||||
|
||||
The Rancher Command Line Interface (CLI)is a unified tool to manage your Rancher server. With this tool, you can control your services, containers and hosts within a Rancher environment and automate them through scripts.
|
||||
The Rancher Command Line Interface (CLI) is a unified tool to interact with your Rancher server.
|
||||
|
||||
## Version Compatibility
|
||||
Rancher CLI v0.2.0+ is only compatible with Rancher Server v1.2.0+.
|
||||
|
||||
## Running
|
||||
## Installing
|
||||
|
||||
You can check the [releases page](https://github.com/rancher/cli/releases) for direct downloads of the binary or [build your own](#building).
|
||||
|
||||
## Setting up Rancher CLI with Rancher Server
|
||||
|
||||
To enable the CLI to connect to Rancher server, you can configure the environment variables needed. The environment variables that are required are `RANCHER_URL`, `RANCHER_ACCESS_KEY` and `RANCHER_SECRET_KEY`.
|
||||
|
||||
The access key and secret key should be an [account API key](http://docs.rancher.com/rancher/latest/en/api/api-keys/#account-api-keys). In your Rancher setup, you can create an account API key under the **API** tab and expand the **Advanced Options**.
|
||||
|
||||
You can run `rancher config` to set these environment variables for the CLI.
|
||||
The CLI needs to know your server address and the credentials required to authenticate with it.
|
||||
Rancher CLI will pull this information from a `cli.json` that is created the first time you run
|
||||
`rancher login`. By default this file is located at `~/.rancher/cli.json`.
|
||||
|
||||
```
|
||||
$ rancher --url http://<RANCHER_SERVER_URL> config
|
||||
URL [http://<RANCHER_SERVER_URL>]:
|
||||
Access Key [http://<RANCHER_SERVER_URL>]: <ACCESS_KEY>
|
||||
Secret Key [http://<RANCHER_SERVER_URL>]: <SECRET_KEY>
|
||||
INFO[0102] Saving config to /Users/<username>/.rancher/cli.json
|
||||
$ rancher login https://<RANCHER_SERVER_URL> -t my-secret-token --name CoolServer1
|
||||
```
|
||||
|
||||
> Note: The `<RANCHER_SERVER_URL>` includes whatever port was exposed when installing Rancher server. If you had followed the installation instructions, your URL would be `http://<server_ip>:8080/`.
|
||||
> Note: The `<RANCHER_SERVER_URL>` includes whatever port was exposed when installing Rancher server.
|
||||
|
||||
## Building
|
||||
If you want to use Rancher CLI on a server that uses a self signed cert you will need to download the cert from `<RANCHER_SERVER_URL>/v3/settings` and pass that into `rancher login` using `--cacert`
|
||||
|
||||
## Building from source
|
||||
|
||||
The binaries will be located in `/bin`.
|
||||
|
||||
@@ -40,9 +33,9 @@ Run `make`.
|
||||
|
||||
Run `CROSS=1 make build`
|
||||
|
||||
### Docker image
|
||||
## Docker image
|
||||
|
||||
Run `docker run --rm -it rancher/cli [ARGS]` You can pass in credentials by bind mounting in a config file or setting env vars. You can also use the wrapper script in `./contrib/rancher` that will make the process a bit easier.
|
||||
Run `docker run --rm -it rancher/cli [ARGS]` You can pass in credentials by bind mounting in a config file.
|
||||
|
||||
To build `rancher/cli` just run `make`. To use a custom Docker repository do `REPO=custom make` and it will producte `custom/cli` image.
|
||||
|
||||
@@ -54,7 +47,7 @@ For bugs, questions, comments, corrections, suggestions, etc., open an issue in
|
||||
Or just [click here](//github.com/rancher/rancher/issues/new?title=%5Bcli%5D%20) to create a new issue.
|
||||
|
||||
## License
|
||||
Copyright (c) 2014-2016 [Rancher Labs, Inc.](http://rancher.com)
|
||||
Copyright (c) 2014-2018 [Rancher Labs, Inc.](http://rancher.com)
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
73
cliclient/cliclient.go
Normal file
73
cliclient/cliclient.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package cliclient
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/cli/config"
|
||||
|
||||
"github.com/rancher/norman/clientbase"
|
||||
clusterClient "github.com/rancher/types/client/cluster/v3"
|
||||
managementClient "github.com/rancher/types/client/management/v3"
|
||||
projectClient "github.com/rancher/types/client/project/v3"
|
||||
)
|
||||
|
||||
type MasterClient struct {
|
||||
ClusterClient *clusterClient.Client
|
||||
ManagementClient *managementClient.Client
|
||||
ProjectClient *projectClient.Client
|
||||
UserConfig *config.ServerConfig
|
||||
}
|
||||
|
||||
func NewMasterClient(config *config.ServerConfig) (*MasterClient, error) {
|
||||
mc := &MasterClient{
|
||||
UserConfig: config,
|
||||
}
|
||||
|
||||
clustProj := SplitOnColon(config.Project)
|
||||
|
||||
serverURL := config.URL
|
||||
|
||||
if !strings.HasSuffix(serverURL, "/v3") {
|
||||
serverURL = config.URL + "/v3"
|
||||
}
|
||||
|
||||
options := &clientbase.ClientOpts{
|
||||
URL: serverURL,
|
||||
AccessKey: config.AccessKey,
|
||||
SecretKey: config.SecretKey,
|
||||
CACerts: config.CACerts,
|
||||
}
|
||||
|
||||
// Setup the management client
|
||||
mClient, err := managementClient.NewClient(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mc.ManagementClient = mClient
|
||||
|
||||
// Setup the cluster client
|
||||
if len(clustProj) == 2 {
|
||||
options.URL = serverURL + "/clusters/" + clustProj[0]
|
||||
}
|
||||
cClient, err := clusterClient.NewClient(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mc.ClusterClient = cClient
|
||||
|
||||
// Setup the project client
|
||||
if len(clustProj) == 2 {
|
||||
options.URL = serverURL + "/projects/" + config.Project
|
||||
}
|
||||
pClient, err := projectClient.NewClient(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mc.ProjectClient = pClient
|
||||
|
||||
return mc, nil
|
||||
}
|
||||
|
||||
func SplitOnColon(s string) []string {
|
||||
return strings.Split(s, ":")
|
||||
}
|
||||
334
cmd/catalog.go
334
cmd/catalog.go
@@ -1,334 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/go-rancher/catalog"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
const (
|
||||
orchestrationSupported = "io.rancher.orchestration.supported"
|
||||
)
|
||||
|
||||
func CatalogCommand() cli.Command {
|
||||
catalogLsFlags := []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "quiet,q",
|
||||
Usage: "Only display IDs",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "'json' or Custom format: '{{.ID}} {{.Template.Id}}'",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "system,s",
|
||||
Usage: "Show system templates, not user",
|
||||
},
|
||||
}
|
||||
|
||||
return cli.Command{
|
||||
Name: "catalog",
|
||||
Usage: "Operations with catalogs",
|
||||
Action: defaultAction(catalogLs),
|
||||
Flags: catalogLsFlags,
|
||||
Subcommands: []cli.Command{
|
||||
cli.Command{
|
||||
Name: "ls",
|
||||
Usage: "List catalog templates",
|
||||
Description: "\nList all catalog templates in the current $RANCHER_ENVIRONMENT. Use `--env <envID>` or `--env <envName>` to select a different environment.\n\nExample:\n\t$ rancher --env k8slab catalog ls\n",
|
||||
ArgsUsage: "None",
|
||||
Action: catalogLs,
|
||||
Flags: catalogLsFlags,
|
||||
},
|
||||
cli.Command{
|
||||
Name: "install",
|
||||
Usage: "Install catalog template",
|
||||
Description: "\nInstall a catalog template in the current $RANCHER_ENVIRONMENT. \nUse `--env <envID>` or `--env <envName>` to select a different environment.\n\nExample:\n\t$ rancher --env k8slab catalog install <CATALOG_ID>\n",
|
||||
Action: catalogInstall,
|
||||
ArgsUsage: "[ID]...",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "answers,a",
|
||||
Usage: "Answer file",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "Name of stack to create",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "system,s",
|
||||
Usage: "Install a system template",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type CatalogData struct {
|
||||
ID string
|
||||
Template catalog.Template
|
||||
Category string
|
||||
}
|
||||
|
||||
func catalogLs(ctx *cli.Context) error {
|
||||
writer := NewTableWriter([][]string{
|
||||
{"NAME", "Template.Name"},
|
||||
{"CATEGORY", "Category"},
|
||||
{"ID", "ID"},
|
||||
}, ctx)
|
||||
defer writer.Close()
|
||||
|
||||
err := forEachTemplate(ctx, func(item catalog.Template) error {
|
||||
writer.Write(CatalogData{
|
||||
ID: templateID(item),
|
||||
Template: item,
|
||||
Category: strings.Join(item.Categories, ","),
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writer.Err()
|
||||
}
|
||||
|
||||
func forEachTemplate(ctx *cli.Context, f func(item catalog.Template) error) error {
|
||||
_, c, _, cc, err := setupCatalogContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts, err := getListTemplatesOpts(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
collection, err := cc.Template.List(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, item := range collection.Data {
|
||||
if err := f(item); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func getListTemplatesOpts(ctx *cli.Context, c *client.RancherClient) (*catalog.ListOpts, error) {
|
||||
opts := &catalog.ListOpts{
|
||||
Filters: map[string]interface{}{},
|
||||
}
|
||||
setting, err := c.Setting.ById("rancher.server.version")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if setting != nil && setting.Value != "" {
|
||||
opts.Filters["rancherVersion"] = setting.Value
|
||||
}
|
||||
|
||||
opts.Filters["category_ne"] = "infra"
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
func setupCatalogContext(ctx *cli.Context) (Config, *client.RancherClient, *client.Project, *catalog.RancherClient, error) {
|
||||
config, err := lookupConfig(ctx)
|
||||
if err != nil {
|
||||
return config, nil, nil, nil, err
|
||||
}
|
||||
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return config, nil, nil, nil, err
|
||||
}
|
||||
|
||||
proj, err := GetEnvironment(config.Environment, c)
|
||||
if err != nil {
|
||||
return config, nil, nil, nil, err
|
||||
}
|
||||
|
||||
cc, err := GetCatalogClient(ctx)
|
||||
if err != nil {
|
||||
return config, nil, nil, nil, err
|
||||
}
|
||||
|
||||
return config, c, proj, cc, nil
|
||||
}
|
||||
|
||||
func templateNameAndVersion(name string) (string, string) {
|
||||
parts := strings.Split(name, ":")
|
||||
if len(parts) == 2 {
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
return parts[0], ""
|
||||
}
|
||||
|
||||
func catalogInstall(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 1 {
|
||||
return errors.New("Exactly one argument is required")
|
||||
}
|
||||
|
||||
_, c, _, cc, err := setupCatalogContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
templateReference := ctx.Args()[0]
|
||||
name, version := templateNameAndVersion(templateReference)
|
||||
|
||||
template, err := getTemplate(ctx, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
templateVersion, err := getTemplateVersion(ctx, cc, template, name, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
answers, err := parseAnswers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
answers, err = askQuestions(answers, templateVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stackName := ctx.String("name")
|
||||
if stackName == "" {
|
||||
stackName = strings.Title(strings.Split(name, "/")[1])
|
||||
}
|
||||
|
||||
externalID := fmt.Sprintf("catalog://%s", templateVersion.Id)
|
||||
id := ""
|
||||
stack, err := c.Stack.Create(&client.Stack{
|
||||
Name: stackName,
|
||||
Templates: templateVersion.Files,
|
||||
ExternalId: externalID,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id = stack.Id
|
||||
|
||||
return WaitFor(ctx, id)
|
||||
}
|
||||
|
||||
func toString(s interface{}) string {
|
||||
if s == nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprint(s)
|
||||
}
|
||||
|
||||
func getTemplateVersion(ctx *cli.Context, cc *catalog.RancherClient, template catalog.Template, name, version string) (catalog.TemplateVersion, error) {
|
||||
templateVersion := catalog.TemplateVersion{}
|
||||
config, err := lookupConfig(ctx)
|
||||
if err != nil {
|
||||
return templateVersion, err
|
||||
}
|
||||
|
||||
if version == "" {
|
||||
version = template.DefaultVersion
|
||||
}
|
||||
|
||||
link, ok := template.VersionLinks[version]
|
||||
if !ok {
|
||||
fmt.Printf("%#v\n", template)
|
||||
return templateVersion, fmt.Errorf("Failed to find the version %s for template %s", version, name)
|
||||
}
|
||||
|
||||
client := &http.Client{}
|
||||
req, err := http.NewRequest("GET", fmt.Sprint(link), nil)
|
||||
req.SetBasicAuth(config.AccessKey, config.SecretKey)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return templateVersion, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return templateVersion, fmt.Errorf("Bad response %d looking up %s", resp.StatusCode, link)
|
||||
|
||||
}
|
||||
|
||||
err = json.NewDecoder(resp.Body).Decode(&templateVersion)
|
||||
return templateVersion, err
|
||||
}
|
||||
|
||||
func getTemplate(ctx *cli.Context, name string) (catalog.Template, error) {
|
||||
found := false
|
||||
foundTemplate := catalog.Template{}
|
||||
err := forEachTemplate(ctx, func(item catalog.Template) error {
|
||||
if found {
|
||||
return nil
|
||||
}
|
||||
|
||||
templateName, _ := templateNameAndVersion(templateID(item))
|
||||
if templateName == name {
|
||||
found = true
|
||||
foundTemplate = item
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if !found && err == nil {
|
||||
err = fmt.Errorf("Failed to find template %s", name)
|
||||
}
|
||||
return foundTemplate, err
|
||||
}
|
||||
|
||||
func templateID(template catalog.Template) string {
|
||||
parts := strings.SplitN(template.Id, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return template.Name
|
||||
}
|
||||
|
||||
first := parts[0]
|
||||
second := parts[1]
|
||||
version := template.DefaultVersion
|
||||
|
||||
parts = strings.SplitN(parts[1], "*", 2)
|
||||
if len(parts) == 2 {
|
||||
second = parts[1]
|
||||
}
|
||||
|
||||
if version == "" {
|
||||
return fmt.Sprintf("%s/%s", first, second)
|
||||
}
|
||||
return fmt.Sprintf("%s/%s:%s", first, second, version)
|
||||
}
|
||||
|
||||
func GetCatalogClient(ctx *cli.Context) (*catalog.RancherClient, error) {
|
||||
config, err := lookupConfig(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
idx := strings.LastIndex(config.URL, "/v3")
|
||||
if idx == -1 {
|
||||
idx = strings.LastIndex(config.URL, "/v1")
|
||||
if idx == -1 {
|
||||
return nil, fmt.Errorf("Invalid URL %s, must contain /v3", config.URL)
|
||||
}
|
||||
}
|
||||
|
||||
url := config.URL[:idx] + "/v1-catalog/schemas"
|
||||
return catalog.NewRancherClient(&catalog.ClientOpts{
|
||||
AccessKey: config.AccessKey,
|
||||
SecretKey: config.SecretKey,
|
||||
Url: url,
|
||||
})
|
||||
}
|
||||
346
cmd/cluster.go
346
cmd/cluster.go
@@ -1,127 +1,343 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"os"
|
||||
"text/template"
|
||||
|
||||
"github.com/rancher/cli/cliclient"
|
||||
managementClient "github.com/rancher/types/client/management/v3"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
const (
|
||||
importDescription = `
|
||||
Imports an existing cluster to be used in rancher either by providing the location
|
||||
to .kube/config or using a generated kubectl command to run in your cluster.
|
||||
`
|
||||
dockerCommandTemplate = "docker run -d --restart=unless-stopped " +
|
||||
"-v /var/run/docker.sock:/var/run/docker.sock --net=host " +
|
||||
"{{.Image}} {{range .RoleFlags}}{{.}}{{end}}--server {{.URL}} " +
|
||||
"--token {{.Token}} --ca-checksum {{.Checksum}}\n"
|
||||
)
|
||||
|
||||
type dockerCommand struct {
|
||||
Checksum string
|
||||
Image string
|
||||
RoleFlags []string
|
||||
Token string
|
||||
URL string
|
||||
}
|
||||
|
||||
type ClusterData struct {
|
||||
Cluster managementClient.Cluster
|
||||
}
|
||||
|
||||
func ClusterCommand() cli.Command {
|
||||
clusterLsFlags := []cli.Flag{
|
||||
listAllFlag(),
|
||||
cli.BoolFlag{
|
||||
Name: "quiet,q",
|
||||
Usage: "Only display IDs",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "'json' or Custom format: '{{.ID}} {{.Environment.Name}}'",
|
||||
},
|
||||
clusterFileFlag := cli.StringFlag{
|
||||
Name: "file, f",
|
||||
Usage: "Location of file to load",
|
||||
}
|
||||
|
||||
return cli.Command{
|
||||
Name: "cluster",
|
||||
Usage: "Interact with cluster",
|
||||
Action: defaultAction(clusterLs),
|
||||
Flags: clusterLsFlags,
|
||||
Name: "clusters",
|
||||
Aliases: []string{"cluster"},
|
||||
Usage: "Operations on clusters",
|
||||
Action: defaultAction(clusterLs),
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "ls",
|
||||
Usage: "List clusters",
|
||||
Description: "\nList all clusters in the current rancher setup\n",
|
||||
Description: "\nLists all clusters in the current cluster.",
|
||||
ArgsUsage: "None",
|
||||
Action: clusterLs,
|
||||
Flags: clusterLsFlags,
|
||||
},
|
||||
// FIXME add this back in along with the required flags
|
||||
//{
|
||||
// Name: "create",
|
||||
// Usage: "create `NAME`",
|
||||
// Description: "\nCreates a cluster on the server",
|
||||
// ArgsUsage: "[NEWCLUSTERNAME...]",
|
||||
// Action: clusterCreate,
|
||||
// Flags: []cli.Flag{
|
||||
// clusterFileFlag,
|
||||
// cli.StringFlag{
|
||||
// Name: "type",
|
||||
// Usage: "type of cluster to create",
|
||||
// },
|
||||
// },
|
||||
//},
|
||||
{
|
||||
Name: "import",
|
||||
Usage: "Import an existing cluster",
|
||||
Description: importDescription,
|
||||
ArgsUsage: "[NEWCLUSTERNAME...]",
|
||||
Action: clusterImport,
|
||||
Flags: []cli.Flag{
|
||||
clusterFileFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "create",
|
||||
Usage: "create cluster",
|
||||
Description: "\nCreate cluster\n",
|
||||
ArgsUsage: "None",
|
||||
Action: clusterCreate,
|
||||
},
|
||||
{
|
||||
Name: "rm",
|
||||
Usage: "remove cluster",
|
||||
Description: "\nRemove cluster\n",
|
||||
ArgsUsage: "None",
|
||||
Action: clusterRemove,
|
||||
},
|
||||
{
|
||||
Name: "export",
|
||||
Usage: "export an external cluster",
|
||||
Description: "\nExport an external cluster inside the current cluster",
|
||||
ArgsUsage: "None",
|
||||
Action: clusterExport,
|
||||
Name: "get-command",
|
||||
Usage: "Returns the command needed to add a node to an existing cluster",
|
||||
ArgsUsage: "[CLUSTERNAME]",
|
||||
Action: getDockerCommand,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "label",
|
||||
Usage: "Labels to apply to a node",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "etcd",
|
||||
Usage: "Use node for etcd",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "management",
|
||||
Usage: "Use node for management",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "worker",
|
||||
Usage: "Use node as a worker",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func clusterLs(ctx *cli.Context) error {
|
||||
c, err := GetRawClient(ctx)
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
collection, err := c.ManagementClient.Cluster.List(defaultListOpts(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writer := NewTableWriter([][]string{
|
||||
{"ID", "Cluster.Id"},
|
||||
{"ID", "Cluster.ID"},
|
||||
{"NAME", "Cluster.Name"},
|
||||
{"STATE", "Cluster.State"},
|
||||
{"CREATED", "Cluster.Created"},
|
||||
{"EMBEDDED", "Cluster.Embedded"},
|
||||
}, ctx)
|
||||
|
||||
defer writer.Close()
|
||||
|
||||
clusters, err := c.Cluster.List(&client.ListOpts{
|
||||
Filters: map[string]interface{}{
|
||||
"removed_null": "true",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, cluster := range clusters.Data {
|
||||
writer.Write(ClusterData{cluster})
|
||||
for _, item := range collection.Data {
|
||||
writer.Write(&ClusterData{
|
||||
Cluster: item,
|
||||
})
|
||||
}
|
||||
|
||||
return writer.Err()
|
||||
}
|
||||
|
||||
func clusterCreate(ctx *cli.Context) error {
|
||||
c, err := GetRawClient(ctx)
|
||||
if err != nil {
|
||||
c, err := GetClient(ctx)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
name := RandomName()
|
||||
|
||||
var name string
|
||||
|
||||
if ctx.NArg() > 0 {
|
||||
name = ctx.Args()[0]
|
||||
}
|
||||
cluster, err := c.Cluster.Create(&client.Cluster{
|
||||
|
||||
_, err = c.ManagementClient.Cluster.Create(&managementClient.Cluster{
|
||||
Name: name,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
fmt.Println(cluster.Id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func clusterRemove(ctx *cli.Context) error {
|
||||
c, err := GetRawClient(ctx)
|
||||
func clusterImport(ctx *cli.Context) error {
|
||||
if ctx.NArg() == 0 {
|
||||
return errors.New("name is required")
|
||||
}
|
||||
|
||||
if ctx.String("file") != "" {
|
||||
err := clusterFromKubeconfig(ctx)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err := clusterFromCommand(ctx)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDockerCommand prints the command needed to add a node to a cluster
|
||||
func getDockerCommand(ctx *cli.Context) error {
|
||||
var clusterName string
|
||||
|
||||
if ctx.NArg() == 0 {
|
||||
return errors.New("cluster name is required")
|
||||
}
|
||||
|
||||
clusterName = ctx.Args().First()
|
||||
|
||||
c, err := GetClient(ctx)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
settingsMap, err := settingsToMap(c)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := defaultListOpts(ctx)
|
||||
opts.Filters["name"] = clusterName
|
||||
|
||||
clusterCollection, err := c.ManagementClient.Cluster.List(opts)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(clusterCollection.Data) == 0 {
|
||||
return fmt.Errorf("no cluster found with the name [%s], run "+
|
||||
"`rancher clusters` to see available clusters", clusterName)
|
||||
}
|
||||
|
||||
clusterToken, err := getClusterRegToken(ctx, c, clusterCollection.Data[0].ID)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
var roleFlags []string
|
||||
|
||||
if ctx.Bool("etcd") {
|
||||
roleFlags = append(roleFlags, "--etcd ")
|
||||
}
|
||||
|
||||
if ctx.Bool("management") {
|
||||
roleFlags = append(roleFlags, "--controlplane ")
|
||||
}
|
||||
|
||||
if ctx.Bool("worker") {
|
||||
roleFlags = append(roleFlags, "--worker ")
|
||||
}
|
||||
|
||||
dockerString, err := template.New("docker").Parse(dockerCommandTemplate)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
dockerInfo := dockerCommand{
|
||||
Checksum: checkSum(settingsMap["cacerts"] + "\n"),
|
||||
Image: settingsMap["agent-image"],
|
||||
RoleFlags: roleFlags,
|
||||
Token: clusterToken.Token,
|
||||
URL: c.UserConfig.URL,
|
||||
}
|
||||
|
||||
fmt.Println("Run this command on an existing machine already running a " +
|
||||
"supported version of Docker:")
|
||||
|
||||
dockerString.Execute(os.Stdout, dockerInfo)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// clusterFromKubeconfig reads in a JSON or yaml of the kubeconfig and uses
|
||||
// that to pull in the cluster
|
||||
func clusterFromKubeconfig(ctx *cli.Context) error {
|
||||
blob, err := readFileReturnJSON(ctx.String("file"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return forEachResourceWithClient(c, ctx, []string{"cluster"}, func(c *client.RancherClient, resource *client.Resource) (string, error) {
|
||||
return resource.Id, c.Delete(resource)
|
||||
c, err := GetClient(ctx)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
cluster, err := c.ManagementClient.Cluster.Create(&managementClient.Cluster{
|
||||
Name: ctx.Args().First(),
|
||||
ImportedConfig: &managementClient.ImportedConfig{KubeConfig: string(blob)},
|
||||
})
|
||||
}
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
func clusterExport(ctx *cli.Context) error {
|
||||
fmt.Println("Support coming soon")
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"Name": cluster.Name,
|
||||
"ID": cluster.ID,
|
||||
}).Info("Cluster created:")
|
||||
return nil
|
||||
}
|
||||
|
||||
type ClusterData struct {
|
||||
Cluster client.Cluster
|
||||
// clusterFromCommand creates a holder cluster and provides the command to run
|
||||
// in the cluster to register with Rancher
|
||||
func clusterFromCommand(ctx *cli.Context) error {
|
||||
c, err := GetClient(ctx)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
// create a holder cluster so we can get the ClusterRegistrationToken
|
||||
cluster, err := c.ManagementClient.Cluster.Create(&managementClient.Cluster{
|
||||
Name: ctx.Args().First(),
|
||||
RancherKubernetesEngineConfig: &managementClient.RancherKubernetesEngineConfig{
|
||||
Nodes: make([]managementClient.RKEConfigNode, 1),
|
||||
},
|
||||
})
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
token, err := getClusterRegToken(ctx, c, cluster.ID)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
//FIXME probably need more info here
|
||||
logrus.Printf("Run the following command in your cluster: %v", token.Command)
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkSum(s string) string {
|
||||
sum := sha256.Sum256([]byte(s))
|
||||
return fmt.Sprintf("%x", sum)
|
||||
}
|
||||
|
||||
// getClusterRegToken will return an existing token or create one if none exist
|
||||
func getClusterRegToken(
|
||||
ctx *cli.Context,
|
||||
c *cliclient.MasterClient,
|
||||
clusterID string,
|
||||
) (managementClient.ClusterRegistrationToken, error) {
|
||||
tokenOpts := defaultListOpts(ctx)
|
||||
tokenOpts.Filters["clusterId"] = clusterID
|
||||
|
||||
clusterTokenCollection, err := c.ManagementClient.ClusterRegistrationToken.List(tokenOpts)
|
||||
if nil != err {
|
||||
return managementClient.ClusterRegistrationToken{}, err
|
||||
}
|
||||
|
||||
if len(clusterTokenCollection.Data) == 0 {
|
||||
crt := &managementClient.ClusterRegistrationToken{
|
||||
ClusterId: clusterID,
|
||||
}
|
||||
clusterToken, err := c.ManagementClient.ClusterRegistrationToken.Create(crt)
|
||||
if nil != err {
|
||||
return managementClient.ClusterRegistrationToken{}, err
|
||||
}
|
||||
return *clusterToken, nil
|
||||
}
|
||||
return clusterTokenCollection.Data[0], nil
|
||||
}
|
||||
|
||||
421
cmd/common.go
421
cmd/common.go
@@ -2,8 +2,11 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -11,354 +14,94 @@ import (
|
||||
"syscall"
|
||||
"text/template"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/rancher/cli/cliclient"
|
||||
"github.com/rancher/cli/config"
|
||||
|
||||
"github.com/docker/docker/pkg/namesgenerator"
|
||||
"github.com/fatih/color"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoEnv = errors.New("Failed to find the current environment")
|
||||
errNoURL = errors.New("RANCHER_URL environment or --url is not set, run `config`")
|
||||
errNoURL = errors.New("RANCHER_URL environment or --Url is not set, run `login`")
|
||||
colors = []color.Attribute{color.FgGreen, color.FgBlue, color.FgCyan, color.FgMagenta, color.FgRed, color.FgWhite, color.FgYellow}
|
||||
)
|
||||
|
||||
func GetRawClient(ctx *cli.Context) (*client.RancherClient, error) {
|
||||
config, err := lookupConfig(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func loadAndVerifyCert(path string) (string, error) {
|
||||
caCert, err := ioutil.ReadFile(path)
|
||||
if nil != err {
|
||||
return "", err
|
||||
}
|
||||
url, err := baseURL(config.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
block, _ := pem.Decode(caCert)
|
||||
|
||||
parsedCert, err := x509.ParseCertificate(block.Bytes)
|
||||
if !parsedCert.IsCA {
|
||||
return "", errors.New("CACerts is not valid")
|
||||
}
|
||||
return client.NewRancherClient(&client.ClientOpts{
|
||||
Url: url + "/v3",
|
||||
AccessKey: config.AccessKey,
|
||||
SecretKey: config.SecretKey,
|
||||
})
|
||||
return string(caCert), nil
|
||||
}
|
||||
|
||||
func lookupConfig(ctx *cli.Context) (Config, error) {
|
||||
func loadConfig(path string) (config.Config, error) {
|
||||
cf := config.Config{
|
||||
Path: path,
|
||||
Servers: make(map[string]*config.ServerConfig),
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadFile(path)
|
||||
if os.IsNotExist(err) {
|
||||
return cf, nil
|
||||
} else if nil != err {
|
||||
return cf, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(content, &cf)
|
||||
cf.Path = path
|
||||
|
||||
return cf, err
|
||||
}
|
||||
|
||||
func lookupConfig(ctx *cli.Context) (*config.ServerConfig, error) {
|
||||
path := ctx.GlobalString("config")
|
||||
if path == "" {
|
||||
path = os.ExpandEnv("${HOME}/.rancher/cli.json")
|
||||
}
|
||||
|
||||
config, err := LoadConfig(path)
|
||||
if err != nil {
|
||||
return config, err
|
||||
}
|
||||
|
||||
url := ctx.GlobalString("url")
|
||||
accessKey := ctx.GlobalString("access-key")
|
||||
secretKey := ctx.GlobalString("secret-key")
|
||||
envName := ctx.GlobalString("environment")
|
||||
|
||||
if url != "" {
|
||||
config.URL = url
|
||||
}
|
||||
if accessKey != "" {
|
||||
config.AccessKey = accessKey
|
||||
}
|
||||
if secretKey != "" {
|
||||
config.SecretKey = secretKey
|
||||
}
|
||||
if envName != "" {
|
||||
config.Environment = envName
|
||||
}
|
||||
|
||||
if config.URL == "" {
|
||||
return config, errNoURL
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func GetClient(ctx *cli.Context) (*client.RancherClient, error) {
|
||||
config, err := lookupConfig(ctx)
|
||||
if err != nil {
|
||||
cf, err := loadConfig(path)
|
||||
if nil != err {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url, err := config.EnvironmentURL()
|
||||
if err != nil {
|
||||
cs := cf.FocusedServer()
|
||||
if cs == nil {
|
||||
return nil, errors.New("no configuration found, run `login`")
|
||||
}
|
||||
|
||||
return cs, nil
|
||||
}
|
||||
|
||||
func GetClient(ctx *cli.Context) (*cliclient.MasterClient, error) {
|
||||
cf, err := lookupConfig(ctx)
|
||||
if nil != err {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.NewRancherClient(&client.ClientOpts{
|
||||
Url: url + "/schemas",
|
||||
AccessKey: config.AccessKey,
|
||||
SecretKey: config.SecretKey,
|
||||
})
|
||||
}
|
||||
|
||||
func GetEnvironment(def string, c *client.RancherClient) (*client.Project, error) {
|
||||
resp, err := c.Project.List(&client.ListOpts{
|
||||
Filters: map[string]interface{}{
|
||||
"all": true,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
mc, err := cliclient.NewMasterClient(cf)
|
||||
if nil != err {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(resp.Data) == 0 {
|
||||
return nil, errNoEnv
|
||||
}
|
||||
|
||||
if len(resp.Data) == 1 {
|
||||
return &resp.Data[0], nil
|
||||
}
|
||||
|
||||
if def == "" {
|
||||
names := []string{}
|
||||
for _, p := range resp.Data {
|
||||
cluster, err := c.Cluster.ById(p.ClusterId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names = append(names, fmt.Sprintf("%s(%s), cluster Name: %s (%s)", p.Name, p.Id, cluster.Name, cluster.Id))
|
||||
}
|
||||
|
||||
idx := selectFromList("Environments:", names)
|
||||
return &resp.Data[idx], nil
|
||||
}
|
||||
|
||||
return LookupEnvironment(c, def)
|
||||
}
|
||||
|
||||
func LookupEnvironment(c *client.RancherClient, name string) (*client.Project, error) {
|
||||
env, err := Lookup(c, name, "account")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if env.Type != "project" {
|
||||
return nil, fmt.Errorf("Failed to find environment: %s", name)
|
||||
}
|
||||
return c.Project.ById(env.Id)
|
||||
}
|
||||
|
||||
func GetOrCreateDefaultStack(c *client.RancherClient, name string) (*client.Stack, error) {
|
||||
if name == "" {
|
||||
name = "Default"
|
||||
}
|
||||
|
||||
resp, err := c.Stack.List(&client.ListOpts{
|
||||
Filters: map[string]interface{}{
|
||||
"name": name,
|
||||
"removed_null": 1,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(resp.Data) > 0 {
|
||||
return &resp.Data[0], nil
|
||||
}
|
||||
|
||||
return c.Stack.Create(&client.Stack{
|
||||
Name: name,
|
||||
})
|
||||
}
|
||||
|
||||
func getHostByHostname(c *client.RancherClient, name string) (client.ResourceCollection, error) {
|
||||
var result client.ResourceCollection
|
||||
allHosts, err := c.Host.List(nil)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
for _, host := range allHosts.Data {
|
||||
if host.Hostname == name {
|
||||
result.Data = append(result.Data, host.Resource)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
return mc, nil
|
||||
}
|
||||
|
||||
func RandomName() string {
|
||||
return strings.Replace(namesgenerator.GetRandomName(0), "_", "-", -1)
|
||||
}
|
||||
|
||||
func getContainerByName(c *client.RancherClient, name string) (client.ResourceCollection, error) {
|
||||
var result client.ResourceCollection
|
||||
stack, containerName, err := ParseName(c, name)
|
||||
containers, err := c.Container.List(&client.ListOpts{
|
||||
Filters: map[string]interface{}{
|
||||
"stackId": stack.Id,
|
||||
"name": containerName,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
for _, container := range containers.Data {
|
||||
result.Data = append(result.Data, container.Resource)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func getProjectByname(c *client.RancherClient, name string) (client.ResourceCollection, error) {
|
||||
var result client.ResourceCollection
|
||||
clusterName, projectName := parseClusterAndProject(name)
|
||||
if clusterName != "" {
|
||||
clusters, err := c.Cluster.List(&client.ListOpts{
|
||||
Filters: map[string]interface{}{
|
||||
"name": clusterName,
|
||||
"removed_null": "true",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if len(clusters.Data) == 0 {
|
||||
return result, errors.Errorf("failed to find cluster with name %s", clusterName)
|
||||
}
|
||||
projects, err := c.Project.List(&client.ListOpts{
|
||||
Filters: map[string]interface{}{
|
||||
"clusterId": clusters.Data[0].Id,
|
||||
"name": projectName,
|
||||
"removed_null": "true",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
for _, project := range projects.Data {
|
||||
result.Data = append(result.Data, project.Resource)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
projects, err := c.Project.List(&client.ListOpts{
|
||||
Filters: map[string]interface{}{
|
||||
"name": projectName,
|
||||
"removed_null": "true",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
for _, project := range projects.Data {
|
||||
result.Data = append(result.Data, project.Resource)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func getServiceByName(c *client.RancherClient, name string) (client.ResourceCollection, error) {
|
||||
var result client.ResourceCollection
|
||||
stack, serviceName, err := ParseName(c, name)
|
||||
|
||||
services, err := c.Service.List(&client.ListOpts{
|
||||
Filters: map[string]interface{}{
|
||||
"stackId": stack.Id,
|
||||
"name": serviceName,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
for _, service := range services.Data {
|
||||
result.Data = append(result.Data, service.Resource)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func Lookup(c *client.RancherClient, name string, types ...string) (*client.Resource, error) {
|
||||
var byName *client.Resource
|
||||
|
||||
for _, schemaType := range types {
|
||||
var resource client.Resource
|
||||
// this is a hack for projects, it returns 403
|
||||
if !strings.Contains(name, "/") {
|
||||
if err := c.ById(schemaType, name, &resource); !client.IsNotFound(err) && err != nil {
|
||||
return nil, err
|
||||
} else if err == nil && resource.Id == name { // The ID check is because of an oddity in the id obfuscation
|
||||
return &resource, nil
|
||||
}
|
||||
}
|
||||
|
||||
var collection client.ResourceCollection
|
||||
if err := c.List(schemaType, &client.ListOpts{
|
||||
Filters: map[string]interface{}{
|
||||
"name": name,
|
||||
"removed_null": "1",
|
||||
},
|
||||
}, &collection); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(collection.Data) > 1 {
|
||||
ids := []string{}
|
||||
for _, data := range collection.Data {
|
||||
switch schemaType {
|
||||
case "project":
|
||||
project, err := c.Project.ById(data.Id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cluster, err := c.Cluster.ById(project.ClusterId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ids = append(ids, fmt.Sprintf("cluster %s, %s (%s)", cluster.Name, data.Id, name))
|
||||
case "container":
|
||||
container, err := c.Container.ById(data.Id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
host, err := c.Host.ById(container.HostId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ids = append(ids, fmt.Sprintf("host %s, %s (%s)", host.Hostname, data.Id, name))
|
||||
default:
|
||||
ids = append(ids, fmt.Sprintf("%s (%s)", data.Id, name))
|
||||
}
|
||||
|
||||
}
|
||||
index := selectFromList("Resources: ", ids)
|
||||
return &collection.Data[index], nil
|
||||
}
|
||||
|
||||
if len(collection.Data) == 0 {
|
||||
var err error
|
||||
// Per type specific logic
|
||||
switch schemaType {
|
||||
case "host":
|
||||
collection, err = getHostByHostname(c, name)
|
||||
case "service":
|
||||
collection, err = getServiceByName(c, name)
|
||||
case "container":
|
||||
collection, err = getContainerByName(c, name)
|
||||
case "project":
|
||||
collection, err = getProjectByname(c, name)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(collection.Data) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
byName = &collection.Data[0]
|
||||
}
|
||||
|
||||
if byName == nil {
|
||||
return nil, fmt.Errorf("Not found: %s", name)
|
||||
}
|
||||
|
||||
return byName, nil
|
||||
}
|
||||
|
||||
func appendTabDelim(buf *bytes.Buffer, value string) {
|
||||
if buf.Len() == 0 {
|
||||
buf.WriteString(value)
|
||||
@@ -403,7 +146,7 @@ func printTemplate(out io.Writer, templateContent string, obj interface{}) error
|
||||
"json": FormatJSON,
|
||||
}
|
||||
tmpl, err := template.New("").Funcs(funcMap).Parse(templateContent)
|
||||
if err != nil {
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -426,3 +169,49 @@ func getRandomColor() color.Attribute {
|
||||
index := r1.Intn(8)
|
||||
return colors[index]
|
||||
}
|
||||
|
||||
func SplitOnColon(s string) []string {
|
||||
return strings.Split(s, ":")
|
||||
}
|
||||
|
||||
func parseClusterAndProject(name string) (string, string) {
|
||||
parts := strings.SplitN(name, "/", 2)
|
||||
if len(parts) == 2 {
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
return "", name
|
||||
}
|
||||
|
||||
// Return a JSON blob of the file at path
|
||||
func readFileReturnJSON(path string) ([]byte, error) {
|
||||
file, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
// This is probably already JSON if true
|
||||
if hasPrefix(file, []byte("{")) {
|
||||
return file, nil
|
||||
}
|
||||
return yaml.YAMLToJSON(file)
|
||||
}
|
||||
|
||||
// Return true if the first non-whitespace bytes in buf is prefix.
|
||||
func hasPrefix(buf []byte, prefix []byte) bool {
|
||||
trim := bytes.TrimLeftFunc(buf, unicode.IsSpace)
|
||||
return bytes.HasPrefix(trim, prefix)
|
||||
}
|
||||
|
||||
func settingsToMap(client *cliclient.MasterClient) (map[string]string, error) {
|
||||
configMap := make(map[string]string)
|
||||
|
||||
settings, err := client.ManagementClient.Setting.List(baseListOpts())
|
||||
if nil != err {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, setting := range settings.Data {
|
||||
configMap[setting.Name] = setting.Value
|
||||
}
|
||||
|
||||
return configMap, nil
|
||||
}
|
||||
|
||||
200
cmd/config.go
200
cmd/config.go
@@ -1,200 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
AccessKey string `json:"accessKey"`
|
||||
SecretKey string `json:"secretKey"`
|
||||
URL string `json:"url"`
|
||||
Environment string `json:"environment"`
|
||||
Path string `json:"path,omitempty"`
|
||||
}
|
||||
|
||||
func baseURL(fullURL string) (string, error) {
|
||||
idx := strings.LastIndex(fullURL, "/v2-beta")
|
||||
if idx == -1 {
|
||||
u, err := url.Parse(fullURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
newURL := url.URL{
|
||||
Scheme: u.Scheme,
|
||||
Host: u.Host,
|
||||
}
|
||||
return newURL.String(), nil
|
||||
}
|
||||
return fullURL[:idx], nil
|
||||
}
|
||||
|
||||
func (c Config) EnvironmentURL() (string, error) {
|
||||
projectID := c.Environment
|
||||
if projectID == "" || !strings.HasPrefix(projectID, "1a") {
|
||||
rancherClient, err := client.NewRancherClient(&client.ClientOpts{
|
||||
Url: c.URL,
|
||||
AccessKey: c.AccessKey,
|
||||
SecretKey: c.SecretKey,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
project, err := GetEnvironment(c.Environment, rancherClient)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
projectID = project.Id
|
||||
}
|
||||
|
||||
url, err := baseURL(c.URL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
url = url + "/v2-beta/projects/" + projectID + "/schemas"
|
||||
return url, nil
|
||||
}
|
||||
|
||||
func (c Config) Write() error {
|
||||
err := os.MkdirAll(path.Dir(c.Path), 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("Saving config to %s", c.Path)
|
||||
p := c.Path
|
||||
c.Path = ""
|
||||
output, err := os.Create(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer output.Close()
|
||||
|
||||
return json.NewEncoder(output).Encode(c)
|
||||
}
|
||||
|
||||
func LoadConfig(path string) (Config, error) {
|
||||
config := Config{
|
||||
Path: path,
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadFile(path)
|
||||
if os.IsNotExist(err) {
|
||||
return config, nil
|
||||
} else if err != nil {
|
||||
return config, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(content, &config)
|
||||
config.Path = path
|
||||
|
||||
return config, err
|
||||
}
|
||||
|
||||
func ConfigCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "config",
|
||||
Usage: "Setup client configuration",
|
||||
Action: configSetup,
|
||||
ArgsUsage: "None",
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "print,p",
|
||||
Usage: "Print the current configuration",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getConfig(reader *bufio.Reader, text, def string) (string, error) {
|
||||
for {
|
||||
fmt.Printf("%s [%s]: ", text, def)
|
||||
input, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
input = strings.TrimSpace(input)
|
||||
|
||||
if input != "" {
|
||||
return input, nil
|
||||
}
|
||||
|
||||
if input == "" && def != "" {
|
||||
return def, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func configSetup(ctx *cli.Context) error {
|
||||
config, err := lookupConfig(ctx)
|
||||
if err != nil && err != errNoURL {
|
||||
return err
|
||||
}
|
||||
|
||||
if ctx.Bool("print") {
|
||||
return json.NewEncoder(os.Stdout).Encode(config)
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
||||
config.URL, err = getConfig(reader, "URL", config.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config.AccessKey, err = getConfig(reader, "Access Key", config.AccessKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config.SecretKey, err = getConfig(reader, "Secret Key", config.SecretKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := client.NewRancherClient(&client.ClientOpts{
|
||||
Url: config.URL,
|
||||
AccessKey: config.AccessKey,
|
||||
SecretKey: config.SecretKey,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if schema, ok := c.GetSchemas().CheckSchema("schema"); ok {
|
||||
// Normalize URL
|
||||
config.URL = schema.Links["collection"]
|
||||
} else {
|
||||
return fmt.Errorf("Failed to find schema URL")
|
||||
}
|
||||
|
||||
c, err = client.NewRancherClient(&client.ClientOpts{
|
||||
Url: config.URL,
|
||||
AccessKey: config.AccessKey,
|
||||
SecretKey: config.SecretKey,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := GetEnvironment("", c)
|
||||
if err != errNoEnv {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.Environment = project.Id
|
||||
}
|
||||
|
||||
return config.Write()
|
||||
}
|
||||
36
cmd/delete.go
Normal file
36
cmd/delete.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func DeleteCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "delete",
|
||||
Aliases: []string{"rm"},
|
||||
Usage: "Delete resources by ID",
|
||||
Action: deleteResource,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "type",
|
||||
Usage: "type of resource to delete",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func deleteResource(ctx *cli.Context) error {
|
||||
if ctx.String("type") == "" {
|
||||
return errors.New("type is required for deletes")
|
||||
}
|
||||
//c, err := GetClient(ctx)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
fmt.Println("This isn't implemented yet")
|
||||
|
||||
return nil
|
||||
}
|
||||
146
cmd/docker.go
146
cmd/docker.go
@@ -1,146 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/rancher/rancher-docker-api-proxy"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func DockerCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "docker",
|
||||
Usage: "Run docker CLI on a host",
|
||||
Description: "\nUses the $RANCHER_DOCKER_HOST to run docker commands. Use `--host <hostID>` or `--host <hostName>` to select a different host.\n\nExample:\n\t$ rancher --host 1h1 docker ps\n",
|
||||
Action: hostDocker,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "help-docker",
|
||||
Usage: "Display the 'docker --help'",
|
||||
},
|
||||
},
|
||||
SkipFlagParsing: true,
|
||||
}
|
||||
}
|
||||
|
||||
func hostDocker(ctx *cli.Context) error {
|
||||
return processExitCode(doDocker(ctx))
|
||||
}
|
||||
|
||||
func doDocker(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) > 0 && (args[0] == "-h" || args[0] == "--help") {
|
||||
return cli.ShowCommandHelp(ctx, "docker")
|
||||
}
|
||||
|
||||
if len(args) > 0 && args[0] == "--help-docker" {
|
||||
return runDockerHelp("")
|
||||
}
|
||||
|
||||
hostname := ctx.GlobalString("host")
|
||||
if hostname == "" {
|
||||
return fmt.Errorf("--host is required")
|
||||
}
|
||||
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return runDocker(hostname, c, args)
|
||||
}
|
||||
|
||||
func runDockerCommand(hostname string, c *client.RancherClient, command string, args []string) error {
|
||||
return runDocker(hostname, c, append([]string{command}, args...))
|
||||
}
|
||||
|
||||
func runDocker(hostname string, c *client.RancherClient, args []string) error {
|
||||
return runDockerWithOutput(hostname, c, args, os.Stdout, os.Stderr)
|
||||
}
|
||||
|
||||
func determineAPIVersion(host *client.Host) string {
|
||||
version := host.Labels["io.rancher.host.docker_version"]
|
||||
parts := strings.Split(fmt.Sprint(version), ".")
|
||||
if len(parts) != 2 {
|
||||
return ""
|
||||
}
|
||||
num, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return fmt.Sprintf("1.%d", num+12)
|
||||
}
|
||||
|
||||
func runDockerWithOutput(hostname string, c *client.RancherClient, args []string,
|
||||
out, outErr io.Writer) error {
|
||||
resource, err := Lookup(c, hostname, "host")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host, err := c.Host.ById(resource.Id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state := getHostState(host)
|
||||
if state != "active" && state != "inactive" && state != "disconnected" {
|
||||
return fmt.Errorf("Can not contact host %s in state %s", hostname, state)
|
||||
}
|
||||
|
||||
apiVersion := determineAPIVersion(host)
|
||||
|
||||
tempfile, err := ioutil.TempFile("", "docker-sock")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(tempfile.Name())
|
||||
|
||||
if err := tempfile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dockerHost := "unix://" + tempfile.Name()
|
||||
proxy := dockerapiproxy.NewProxy(c, host.Id, dockerHost)
|
||||
if err := proxy.Listen(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
logrus.Fatal(proxy.Serve())
|
||||
}()
|
||||
|
||||
var cmd *exec.Cmd
|
||||
if len(args) > 0 && args[0] == "--" {
|
||||
if len(args) > 1 {
|
||||
cmd = exec.Command(args[1], args[2:]...)
|
||||
} else {
|
||||
cmd = exec.Command(os.Getenv("SHELL"))
|
||||
}
|
||||
cmd.Env = append(os.Environ(), "debian_chroot=docker:"+hostname)
|
||||
} else {
|
||||
cmd = exec.Command("docker", args...)
|
||||
cmd.Env = os.Environ()
|
||||
}
|
||||
|
||||
cmd.Env = append(cmd.Env, "DOCKER_HOST="+dockerHost)
|
||||
if apiVersion != "" {
|
||||
cmd.Env = append(cmd.Env, "DOCKER_API_VERSION="+apiVersion)
|
||||
}
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = out
|
||||
cmd.Stderr = outErr
|
||||
|
||||
signal.Ignore(os.Interrupt)
|
||||
return cmd.Run()
|
||||
}
|
||||
309
cmd/env.go
309
cmd/env.go
@@ -1,309 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func EnvCommand() cli.Command {
|
||||
envLsFlags := []cli.Flag{
|
||||
listAllFlag(),
|
||||
cli.BoolFlag{
|
||||
Name: "quiet,q",
|
||||
Usage: "Only display IDs",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "'json' or Custom format: '{{.ID}} {{.Environment.Name}}'",
|
||||
},
|
||||
}
|
||||
|
||||
return cli.Command{
|
||||
Name: "environment",
|
||||
ShortName: "env",
|
||||
Usage: "Interact with environments",
|
||||
Action: defaultAction(envLs),
|
||||
Flags: envLsFlags,
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "ls",
|
||||
Usage: "List environments",
|
||||
Description: "\nWith an account API key, all environments in Rancher will be listed. If you are using an environment API key, it will only list the environment of the API key. \n\nExample:\n\t$ rancher env ls\n",
|
||||
ArgsUsage: "None",
|
||||
Action: envLs,
|
||||
Flags: envLsFlags,
|
||||
},
|
||||
{
|
||||
Name: "create",
|
||||
Usage: "Create an environment",
|
||||
Description: `
|
||||
Example:
|
||||
|
||||
$ rancher env create newEnv
|
||||
|
||||
`,
|
||||
ArgsUsage: "[NEWENVNAME...]",
|
||||
Action: envCreate,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "cluster,c",
|
||||
Usage: "Cluster name to create the environment",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rm",
|
||||
Usage: "Remove environment(s)",
|
||||
Description: "\nExample:\n\t$ rancher env rm 1a5\n\t$ rancher env rm newEnv\n",
|
||||
ArgsUsage: "[ENVID ENVNAME...]",
|
||||
Action: envRm,
|
||||
Flags: []cli.Flag{},
|
||||
},
|
||||
{
|
||||
Name: "deactivate",
|
||||
Usage: "Deactivate environment(s)",
|
||||
Description: `
|
||||
Deactivate an environment by ID or name
|
||||
|
||||
Example:
|
||||
$ rancher env deactivate 1a5
|
||||
$ rancher env deactivate Default
|
||||
`,
|
||||
ArgsUsage: "[ID NAME...]",
|
||||
Action: envDeactivate,
|
||||
Flags: []cli.Flag{},
|
||||
},
|
||||
{
|
||||
Name: "activate",
|
||||
Usage: "Activate environment(s)",
|
||||
Description: `
|
||||
Activate an environment by ID or name
|
||||
|
||||
Example:
|
||||
$ rancher env activate 1a5
|
||||
$ rancher env activate Default
|
||||
`,
|
||||
ArgsUsage: "[ID NAME...]",
|
||||
Action: envActivate,
|
||||
Flags: []cli.Flag{},
|
||||
},
|
||||
{
|
||||
Name: "switch",
|
||||
Usage: "Switch environment(s)",
|
||||
Description: `
|
||||
Switch current environment to others,
|
||||
|
||||
Example:
|
||||
$ rancher env switch 1a5
|
||||
$ rancher env switch Default
|
||||
`,
|
||||
ArgsUsage: "[ID NAME...]",
|
||||
Action: envSwitch,
|
||||
Flags: []cli.Flag{},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type EnvData struct {
|
||||
ID string
|
||||
Environment *client.Project
|
||||
Current string
|
||||
Name string
|
||||
}
|
||||
|
||||
func NewEnvData(project client.Project, current bool, name string) *EnvData {
|
||||
marked := ""
|
||||
if current {
|
||||
marked = " *"
|
||||
}
|
||||
return &EnvData{
|
||||
ID: project.Id,
|
||||
Environment: &project,
|
||||
Current: marked,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
func envRm(ctx *cli.Context) error {
|
||||
c, err := GetRawClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return forEachResourceWithClient(c, ctx, []string{"project"}, func(c *client.RancherClient, resource *client.Resource) (string, error) {
|
||||
return resource.Id, c.Delete(resource)
|
||||
})
|
||||
}
|
||||
|
||||
func envCreate(ctx *cli.Context) error {
|
||||
c, err := GetRawClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := RandomName()
|
||||
if ctx.NArg() > 0 {
|
||||
name = ctx.Args()[0]
|
||||
}
|
||||
clusters, err := c.Cluster.List(&client.ListOpts{
|
||||
Filters: map[string]interface{}{
|
||||
"removed_null": true,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(clusters.Data) == 0 {
|
||||
return errors.New("there is no cluster in current setup")
|
||||
}
|
||||
clusterNames := []string{}
|
||||
selectedClusterID := ""
|
||||
if ctx.String("cluster") != "" {
|
||||
for _, cluster := range clusters.Data {
|
||||
if cluster.Name == ctx.String("cluster") {
|
||||
selectedClusterID = cluster.Id
|
||||
}
|
||||
}
|
||||
if selectedClusterID == "" {
|
||||
return errors.Errorf("failed to find cluster associated with the specified cluster name %v", ctx.String("cluster"))
|
||||
}
|
||||
} else {
|
||||
for _, cluster := range clusters.Data {
|
||||
clusterNames = append(clusterNames, fmt.Sprintf("%s (%s)", cluster.Name, cluster.Id))
|
||||
}
|
||||
index := selectFromList("Clusters: ", clusterNames)
|
||||
selectedClusterID = clusters.Data[index].Id
|
||||
}
|
||||
data := map[string]interface{}{
|
||||
"name": name,
|
||||
"clusterId": selectedClusterID,
|
||||
}
|
||||
|
||||
var newEnv client.Project
|
||||
if err := c.Create("project", data, &newEnv); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(newEnv.Id)
|
||||
return nil
|
||||
}
|
||||
|
||||
func envLs(ctx *cli.Context) error {
|
||||
c, err := GetRawClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config, err := lookupConfig(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentEnvID := config.Environment
|
||||
|
||||
writer := NewTableWriter([][]string{
|
||||
{"ID", "ID"},
|
||||
{"CLUSTER/NAME", "Name"},
|
||||
{"STATE", "Environment.State"},
|
||||
{"CREATED", "Environment.Created"},
|
||||
{"CURRENT", "Current"},
|
||||
}, ctx)
|
||||
defer writer.Close()
|
||||
|
||||
listOpts := defaultListOpts(ctx)
|
||||
listOpts.Filters["all"] = true
|
||||
collection, err := c.Project.List(listOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, item := range collection.Data {
|
||||
current := false
|
||||
if item.Id == currentEnvID {
|
||||
current = true
|
||||
}
|
||||
clusterName := ""
|
||||
if item.ClusterId != "" {
|
||||
cluster, err := c.Cluster.ById(item.ClusterId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterName = cluster.Name
|
||||
}
|
||||
name := item.Name
|
||||
if clusterName != "" {
|
||||
name = fmt.Sprintf("%s/%s", clusterName, name)
|
||||
}
|
||||
writer.Write(NewEnvData(item, current, name))
|
||||
}
|
||||
|
||||
return writer.Err()
|
||||
}
|
||||
|
||||
func envDeactivate(ctx *cli.Context) error {
|
||||
c, err := GetRawClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return forEachResourceWithClient(c, ctx, []string{"project"}, func(c *client.RancherClient, resource *client.Resource) (string, error) {
|
||||
action, err := pickAction(resource, "deactivate")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return resource.Id, c.Action(resource.Type, action, resource, nil, resource)
|
||||
})
|
||||
}
|
||||
|
||||
func envActivate(ctx *cli.Context) error {
|
||||
c, err := GetRawClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return forEachResourceWithClient(c, ctx, []string{"project"}, func(c *client.RancherClient, resource *client.Resource) (string, error) {
|
||||
action, err := pickAction(resource, "activate")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return resource.Id, c.Action(resource.Type, action, resource, nil, resource)
|
||||
})
|
||||
}
|
||||
|
||||
func envSwitch(ctx *cli.Context) error {
|
||||
c, err := GetRawClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ctx.NArg() == 0 {
|
||||
return cli.ShowCommandHelp(ctx, "env")
|
||||
}
|
||||
name := ctx.Args()[0]
|
||||
resource, err := Lookup(c, name, "project")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config, err := lookupConfig(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.Environment = resource.Id
|
||||
err = config.Write()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return envLs(ctx)
|
||||
}
|
||||
|
||||
func parseClusterAndProject(name string) (string, string) {
|
||||
parts := strings.SplitN(name, "/", 2)
|
||||
if len(parts) == 2 {
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
return "", name
|
||||
}
|
||||
100
cmd/events.go
100
cmd/events.go
@@ -1,100 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/rancher/cli/monitor"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func EventsCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "events",
|
||||
ShortName: "event",
|
||||
Usage: "Displays resource change events",
|
||||
Description: "\nOnly events that are actively occuring in Rancher are listed.\n",
|
||||
ArgsUsage: "None",
|
||||
Action: events,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "'json' or Custom format: '{{.Name}} {{.Data.resource.kind}}'",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "reconnect,r",
|
||||
Usage: "Reconnect on error",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getClientForSubscribe(ctx *cli.Context) (*client.RancherClient, error) {
|
||||
if ctx.Bool("all") {
|
||||
return GetRawClient(ctx)
|
||||
}
|
||||
return GetClient(ctx)
|
||||
}
|
||||
|
||||
func events(ctx *cli.Context) error {
|
||||
reconnect := ctx.Bool("reconnect")
|
||||
|
||||
for {
|
||||
c, err := getClientForSubscribe(ctx)
|
||||
if err != nil {
|
||||
if reconnect {
|
||||
logrus.Error(err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
m := monitor.New(c)
|
||||
sub := m.Subscribe()
|
||||
go func() {
|
||||
if ctx.Bool("reconnect") {
|
||||
for {
|
||||
if err := m.Start(); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
} else {
|
||||
logrus.Fatal(m.Start())
|
||||
}
|
||||
}()
|
||||
|
||||
format := ctx.String("format")
|
||||
for event := range sub.C {
|
||||
if format == "" {
|
||||
resource, _ := event.Data["resource"].(map[string]interface{})
|
||||
name, _ := resource["name"].(string)
|
||||
|
||||
if name == "ping" {
|
||||
continue
|
||||
}
|
||||
|
||||
healthState, _ := resource["healthState"].(string)
|
||||
state, _ := resource["state"].(string)
|
||||
|
||||
combined := healthState
|
||||
if state != "active" || combined == "" {
|
||||
combined = state
|
||||
}
|
||||
|
||||
message, _ := resource["transitioningMessage"].(string)
|
||||
fmt.Printf("%s %s %s [%s] %v\n", event.ResourceType, event.ResourceID, combined, name, message)
|
||||
} else {
|
||||
writer := NewTableWriter(nil, ctx)
|
||||
writer.Write(event)
|
||||
if err := writer.Err(); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
194
cmd/exec.go
194
cmd/exec.go
@@ -1,194 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func ExecCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "exec",
|
||||
Usage: "Run a command on a container",
|
||||
Description: "\nThe command will find the container on the host and use `docker exec` to access the container. Any options that `docker exec` uses can be passed as an option for `rancher exec`.\n\nExample:\n\t$ rancher exec -i -t 1i1\n",
|
||||
Action: execCommand,
|
||||
SkipFlagParsing: true,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "help-docker",
|
||||
Usage: "Display the 'docker exec --help'",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func execCommand(ctx *cli.Context) error {
|
||||
return processExitCode(execCommandInternal(ctx))
|
||||
}
|
||||
|
||||
func execCommandInternal(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) > 0 && (args[0] == "-h" || args[0] == "--help") {
|
||||
return cli.ShowCommandHelp(ctx, "exec")
|
||||
}
|
||||
|
||||
if len(args) > 0 && args[0] == "--help-docker" {
|
||||
return runDockerHelp("exec")
|
||||
}
|
||||
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args, hostID, _, err := selectContainer(c, ctx.Args())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// this is a massive hack. Need to fix the real issue
|
||||
args = append([]string{"-i"}, args...)
|
||||
return runDockerCommand(hostID, c, "exec", args)
|
||||
}
|
||||
|
||||
func isHelp(args []string) bool {
|
||||
for _, i := range args {
|
||||
if i == "--help" || i == "-h" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func selectContainer(c *client.RancherClient, args []string) ([]string, string, string, error) {
|
||||
newArgs := make([]string, len(args))
|
||||
copy(newArgs, args)
|
||||
|
||||
name := ""
|
||||
index := 0
|
||||
for i, val := range newArgs {
|
||||
if !strings.HasPrefix(val, "-") {
|
||||
name = val
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if name == "" {
|
||||
return nil, "", "", fmt.Errorf("Please specify container name as an argument")
|
||||
}
|
||||
|
||||
resource, err := Lookup(c, name, "container", "service")
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
if _, ok := resource.Links["host"]; ok {
|
||||
hostID, containerID, err := getHostnameAndContainerID(c, resource.Id)
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
newArgs[index] = containerID
|
||||
return newArgs, hostID, containerID, nil
|
||||
}
|
||||
|
||||
if _, ok := resource.Links["instances"]; ok {
|
||||
var instances client.ContainerCollection
|
||||
if err := c.GetLink(*resource, "instances", &instances); err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
hostID, containerID, err := getHostnameAndContainerIDFromList(c, instances)
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
newArgs[index] = containerID
|
||||
return newArgs, hostID, containerID, nil
|
||||
}
|
||||
|
||||
return nil, "", "", nil
|
||||
}
|
||||
|
||||
func getHostnameAndContainerIDFromList(c *client.RancherClient, containers client.ContainerCollection) (string, string, error) {
|
||||
if len(containers.Data) == 0 {
|
||||
return "", "", fmt.Errorf("Failed to find a container")
|
||||
}
|
||||
|
||||
if len(containers.Data) == 1 {
|
||||
return containers.Data[0].HostId, containers.Data[0].ExternalId, nil
|
||||
}
|
||||
|
||||
names := []string{}
|
||||
for _, container := range containers.Data {
|
||||
name := ""
|
||||
if container.Name == "" {
|
||||
name = container.Id
|
||||
} else {
|
||||
name = container.Name
|
||||
}
|
||||
names = append(names, fmt.Sprintf("%s (%s)", name, container.PrimaryIpAddress))
|
||||
}
|
||||
|
||||
index := selectFromList("Containers:", names)
|
||||
return containers.Data[index].HostId, containers.Data[index].ExternalId, nil
|
||||
}
|
||||
|
||||
func selectFromList(header string, choices []string) int {
|
||||
if header != "" {
|
||||
fmt.Println(header)
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
selected := -1
|
||||
for selected <= 0 || selected > len(choices) {
|
||||
for i, choice := range choices {
|
||||
fmt.Printf("[%d] %s\n", i+1, choice)
|
||||
}
|
||||
fmt.Print("Select: ")
|
||||
|
||||
text, _ := reader.ReadString('\n')
|
||||
text = strings.TrimSpace(text)
|
||||
num, err := strconv.Atoi(text)
|
||||
if err == nil {
|
||||
selected = num
|
||||
}
|
||||
}
|
||||
return selected - 1
|
||||
}
|
||||
|
||||
func getHostnameAndContainerID(c *client.RancherClient, containerID string) (string, string, error) {
|
||||
container, err := c.Container.ById(containerID)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
var host client.Host
|
||||
if err := c.GetLink(container.Resource, "host", &host); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if host.Id == "" {
|
||||
return "", "", fmt.Errorf("Failed to find host for container %s", container.Name)
|
||||
}
|
||||
|
||||
return host.Id, container.ExternalId, nil
|
||||
}
|
||||
|
||||
func runDockerHelp(subcommand string) error {
|
||||
args := []string{"--help"}
|
||||
if subcommand != "" {
|
||||
args = []string{subcommand, "--help"}
|
||||
}
|
||||
cmd := exec.Command("docker", args...)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
167
cmd/export.go
167
cmd/export.go
@@ -1,167 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func ExportCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "export",
|
||||
Usage: "Export configuration yml for a stack as a tar archive or to local files",
|
||||
Description: `
|
||||
Exports the docker-compose.yml and rancher-compose.yml for the specified stack as a tar archive.
|
||||
|
||||
Example:
|
||||
$ rancher export mystack
|
||||
$ rancher export -f files.tar mystack
|
||||
# Export the entire environment, including system stacks
|
||||
$ rancher export --system mystack
|
||||
`,
|
||||
ArgsUsage: "[STACKNAME STACKID...]",
|
||||
Action: exportService,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "file,f",
|
||||
Usage: "Write to a file, instead of local files, use - to write to STDOUT",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "system,s",
|
||||
Usage: "If exporting the entire environment, include system",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getOutput(ctx *cli.Context) (io.WriteCloser, error) {
|
||||
output := ctx.String("file")
|
||||
if output == "" {
|
||||
return nil, nil
|
||||
} else if output == "-" {
|
||||
return os.Stdout, nil
|
||||
}
|
||||
return os.Create(output)
|
||||
}
|
||||
|
||||
func getStackNames(ctx *cli.Context, c *client.RancherClient) ([]string, error) {
|
||||
stacks, err := c.Stack.List(defaultListOpts(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := []string{}
|
||||
for _, stack := range stacks.Data {
|
||||
result = append(result, stack.Name)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func exportService(ctx *cli.Context) error {
|
||||
var err error
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
names := ctx.Args()
|
||||
if len(names) == 0 {
|
||||
names, err = getStackNames(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var archive *tar.Writer
|
||||
output, err := getOutput(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if output != nil {
|
||||
defer output.Close()
|
||||
archive = tar.NewWriter(output)
|
||||
defer archive.Close()
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
resource, err := Lookup(c, name, "stack")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stack, err := c.Stack.ById(resource.Id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := stack.Actions["exportconfig"]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
config, err := c.Stack.ActionExportconfig(stack, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := addToTar(archive, stack.Name, "compose.yml", config.Templates["compose.yml"]); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(config.Actions) > 0 {
|
||||
if err := addToTar(archive, stack.Name, "answers", marshalAnswers(config.Actions)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func marshalAnswers(answers map[string]string) string {
|
||||
buf := &bytes.Buffer{}
|
||||
for k, v := range answers {
|
||||
buf.WriteString(k)
|
||||
buf.WriteString("=")
|
||||
buf.WriteString(v)
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func addToTar(archive *tar.Writer, stackName, name string, stringContent string) error {
|
||||
if len(stringContent) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
f := filepath.Join(stackName, name)
|
||||
if archive == nil {
|
||||
err := os.MkdirAll(stackName, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Creating %s", f)
|
||||
return ioutil.WriteFile(f, []byte(stringContent), 0600)
|
||||
}
|
||||
|
||||
content := []byte(stringContent)
|
||||
err := archive.WriteHeader(&tar.Header{
|
||||
Name: f,
|
||||
Size: int64(len(content)),
|
||||
Mode: 0644,
|
||||
Uname: "root",
|
||||
Gname: "root",
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = archive.Write(content)
|
||||
return err
|
||||
}
|
||||
119
cmd/host.go
119
cmd/host.go
@@ -1,119 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func HostCommand() cli.Command {
|
||||
hostLsFlags := []cli.Flag{
|
||||
listAllFlag(),
|
||||
cli.BoolFlag{
|
||||
Name: "quiet,q",
|
||||
Usage: "Only display IDs",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "'json' or Custom format: '{{.ID}} {{.Host.Hostname}}'",
|
||||
},
|
||||
}
|
||||
|
||||
return cli.Command{
|
||||
Name: "hosts",
|
||||
ShortName: "host",
|
||||
Usage: "Operations on hosts",
|
||||
Action: defaultAction(hostLs),
|
||||
Flags: hostLsFlags,
|
||||
Subcommands: []cli.Command{
|
||||
cli.Command{
|
||||
Name: "ls",
|
||||
Usage: "List hosts",
|
||||
Description: "\nLists all hosts in the current $RANCHER_ENVIRONMENT. Use `--env <envID>` or `--env <envName>` to select a different environment.\n\nExample:\n\t$ rancher hosts ls\n\t$ rancher --env 1a5 hosts ls\n",
|
||||
ArgsUsage: "None",
|
||||
Action: hostLs,
|
||||
Flags: hostLsFlags,
|
||||
},
|
||||
cli.Command{
|
||||
Name: "create",
|
||||
Usage: "Create a host",
|
||||
Description: "\nCreates a host in the $RANCHER_ENVIRONMENT. Use `--env <envID>` or `--env <envName>` to select a different environment.\n\nExample:\n\t$ rancher --env k8slab host create newHostName\n",
|
||||
ArgsUsage: "[NEWHOSTNAME...]",
|
||||
SkipFlagParsing: true,
|
||||
Action: hostCreate,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type HostsData struct {
|
||||
ID string
|
||||
Host client.Host
|
||||
State string
|
||||
ContainerCount int
|
||||
Labels string
|
||||
}
|
||||
|
||||
func getHostState(host *client.Host) string {
|
||||
state := host.State
|
||||
if state == "active" && host.AgentState != "" {
|
||||
state = host.AgentState
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
func getLabels(host *client.Host) string {
|
||||
var buffer bytes.Buffer
|
||||
it := 0
|
||||
for key, value := range host.Labels {
|
||||
if strings.HasPrefix(key, "io.rancher") {
|
||||
continue
|
||||
} else if it > 0 {
|
||||
buffer.WriteString(",")
|
||||
}
|
||||
|
||||
buffer.WriteString(key)
|
||||
buffer.WriteString("=")
|
||||
buffer.WriteString(value)
|
||||
it++
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func hostLs(ctx *cli.Context) error {
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
collection, err := c.Host.List(defaultListOpts(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writer := NewTableWriter([][]string{
|
||||
{"ID", "Host.Id"},
|
||||
{"HOSTNAME", "Host.Hostname"},
|
||||
{"STATE", "State"},
|
||||
{"CONTAINERS", "ContainerCount"},
|
||||
{"IP", "Host.AgentIpAddress"},
|
||||
{"LABELS", "Labels"},
|
||||
{"DETAIL", "Host.TransitioningMessage"},
|
||||
}, ctx)
|
||||
|
||||
defer writer.Close()
|
||||
|
||||
for _, item := range collection.Data {
|
||||
writer.Write(&HostsData{
|
||||
ID: item.Id,
|
||||
Host: item,
|
||||
State: getHostState(&item),
|
||||
ContainerCount: len(item.InstanceIds),
|
||||
Labels: getLabels(&item),
|
||||
})
|
||||
}
|
||||
|
||||
return writer.Err()
|
||||
}
|
||||
@@ -1,299 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"unicode"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func toEnv(name string) string {
|
||||
buf := bytes.Buffer{}
|
||||
for _, c := range name {
|
||||
if unicode.IsUpper(c) {
|
||||
buf.WriteRune('_')
|
||||
buf.WriteRune(unicode.ToLower(c))
|
||||
} else if c == '-' {
|
||||
buf.WriteRune('_')
|
||||
} else {
|
||||
buf.WriteRune(c)
|
||||
}
|
||||
}
|
||||
return strings.ToUpper(buf.String())
|
||||
}
|
||||
|
||||
func toAPI(name string) string {
|
||||
buf := bytes.Buffer{}
|
||||
upper := false
|
||||
for _, c := range name {
|
||||
if c == '-' {
|
||||
upper = true
|
||||
} else if upper {
|
||||
upper = false
|
||||
buf.WriteRune(unicode.ToUpper(c))
|
||||
} else {
|
||||
buf.WriteRune(c)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func toArg(name string) string {
|
||||
buf := bytes.Buffer{}
|
||||
for _, c := range name {
|
||||
if unicode.IsUpper(c) {
|
||||
buf.WriteRune('-')
|
||||
buf.WriteRune(unicode.ToLower(c))
|
||||
} else {
|
||||
buf.WriteRune(c)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func buildFlag(name string, field client.Field) cli.Flag {
|
||||
var flag cli.Flag
|
||||
switch field.Type {
|
||||
case "bool":
|
||||
flag = cli.BoolFlag{
|
||||
Name: toArg(name),
|
||||
EnvVar: toEnv(name),
|
||||
Usage: field.Description,
|
||||
}
|
||||
case "array[string]":
|
||||
fallthrough
|
||||
case "map[string]":
|
||||
flag = cli.StringSliceFlag{
|
||||
Name: toArg(name),
|
||||
EnvVar: toEnv(name),
|
||||
Usage: field.Description,
|
||||
}
|
||||
default:
|
||||
sflag := cli.StringFlag{
|
||||
Name: toArg(name),
|
||||
EnvVar: toEnv(name),
|
||||
Usage: field.Description,
|
||||
}
|
||||
flag = sflag
|
||||
if field.Default != nil {
|
||||
sflag.Value = fmt.Sprint(field.Default)
|
||||
}
|
||||
}
|
||||
|
||||
return flag
|
||||
}
|
||||
|
||||
func buildFlags(prefix string, schema client.Schema, schemas *client.Schemas) []cli.Flag {
|
||||
flags := []cli.Flag{}
|
||||
for name, field := range schema.ResourceFields {
|
||||
if !field.Create || name == "hostname" {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasSuffix(name, "Config") {
|
||||
subSchema := schemas.Schema(name)
|
||||
driver := strings.TrimSuffix(name, "Config")
|
||||
flags = append(flags, buildFlags(driver+"-", subSchema, schemas)...)
|
||||
} else {
|
||||
if prefix != "" {
|
||||
name = prefix + name
|
||||
}
|
||||
flags = append(flags, buildFlag(name, field))
|
||||
}
|
||||
}
|
||||
|
||||
return flags
|
||||
}
|
||||
|
||||
func hostCreate(ctx *cli.Context) error {
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hostSchema := c.GetSchemas().Schema("host")
|
||||
flags := buildFlags("", hostSchema, c.GetSchemas())
|
||||
drivers := []string{}
|
||||
|
||||
for name := range hostSchema.ResourceFields {
|
||||
if strings.HasSuffix(name, "Config") {
|
||||
drivers = append(drivers, strings.TrimSuffix(name, "Config"))
|
||||
}
|
||||
}
|
||||
|
||||
hostCommand := HostCommand()
|
||||
|
||||
for i := range hostCommand.Subcommands {
|
||||
if hostCommand.Subcommands[i].Name == "create" {
|
||||
hostCommand.Subcommands[i].Flags = append(flags, cli.StringFlag{
|
||||
Name: "driver,d",
|
||||
Usage: "Driver to use: " + strings.Join(drivers, ", "),
|
||||
EnvVar: "MACHINE_DRIVER",
|
||||
})
|
||||
hostCommand.Subcommands[i].Action = func(ctx *cli.Context) error {
|
||||
return hostCreateRun(ctx, c, hostSchema, c.GetSchemas())
|
||||
}
|
||||
hostCommand.Subcommands[i].SkipFlagParsing = false
|
||||
}
|
||||
}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Flags = []cli.Flag{
|
||||
//TODO: remove duplication here
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Debug logging",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "config,c",
|
||||
Usage: "Client configuration file (default ${HOME}/.rancher/cli.json)",
|
||||
EnvVar: "RANCHER_CLIENT_CONFIG",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "environment,env",
|
||||
Usage: "Environment name or ID",
|
||||
EnvVar: "RANCHER_ENVIRONMENT",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "url",
|
||||
Usage: "Specify the Rancher API endpoint URL",
|
||||
EnvVar: "RANCHER_URL",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "access-key",
|
||||
Usage: "Specify Rancher API access key",
|
||||
EnvVar: "RANCHER_ACCESS_KEY",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "secret-key",
|
||||
Usage: "Specify Rancher API secret key",
|
||||
EnvVar: "RANCHER_SECRET_KEY",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "host",
|
||||
Usage: "Host used for docker command",
|
||||
EnvVar: "RANCHER_DOCKER_HOST",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "rancher-file,r",
|
||||
Usage: "Specify an alternate Rancher compose file (default: rancher-compose.yml)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "env-file,e",
|
||||
Usage: "Specify a file from which to read environment variables",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "file,f",
|
||||
Usage: "Specify one or more alternate compose files (default: docker-compose.yml)",
|
||||
Value: &cli.StringSlice{},
|
||||
EnvVar: "COMPOSE_FILE",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "stack,s",
|
||||
Usage: "Specify an alternate project name (default: directory name)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "wait,w",
|
||||
Usage: "Wait for resource to reach resting state",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "wait-timeout",
|
||||
Usage: "Timeout in seconds to wait",
|
||||
Value: 600,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "wait-state",
|
||||
Usage: "State to wait for (active, healthy, etc)",
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{
|
||||
hostCommand,
|
||||
}
|
||||
return app.Run(os.Args)
|
||||
}
|
||||
|
||||
func hostCreateRun(ctx *cli.Context, c *client.RancherClient, machineSchema client.Schema, schemas *client.Schemas) error {
|
||||
args := map[string]interface{}{}
|
||||
driverArgs := map[string]interface{}{}
|
||||
driver := ctx.String("driver")
|
||||
|
||||
if driver == "" {
|
||||
return fmt.Errorf("--driver is required")
|
||||
}
|
||||
|
||||
driverSchema, ok := schemas.CheckSchema(driver + "Config")
|
||||
if !ok {
|
||||
return fmt.Errorf("Invalid driver: %s", driver)
|
||||
}
|
||||
|
||||
for _, name := range ctx.FlagNames() {
|
||||
schema := machineSchema
|
||||
destArgs := args
|
||||
key := name
|
||||
value := ctx.Generic(name)
|
||||
|
||||
// really dumb way to detect empty values
|
||||
if str := fmt.Sprint(value); str == "" || str == "[]" {
|
||||
continue
|
||||
}
|
||||
|
||||
key = toAPI(strings.TrimPrefix(name, driver+"-"))
|
||||
|
||||
if strings.HasPrefix(name, driver+"-") {
|
||||
schema = driverSchema
|
||||
destArgs = driverArgs
|
||||
}
|
||||
|
||||
fieldType := schema.ResourceFields[key].Type
|
||||
if fieldType == "map[string]" {
|
||||
mapValue := map[string]string{}
|
||||
for _, val := range ctx.StringSlice(name) {
|
||||
parts := strings.SplitN(val, "=", 2)
|
||||
if len(parts) == 1 {
|
||||
mapValue[parts[0]] = ""
|
||||
} else {
|
||||
mapValue[parts[0]] = parts[1]
|
||||
}
|
||||
}
|
||||
value = mapValue
|
||||
}
|
||||
|
||||
destArgs[key] = value
|
||||
}
|
||||
|
||||
args[driver+"Config"] = driverArgs
|
||||
|
||||
names := ctx.Args()
|
||||
if len(names) == 0 {
|
||||
names = []string{RandomName()}
|
||||
}
|
||||
|
||||
w, err := NewWaiter(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var lastErr error
|
||||
for _, name := range names {
|
||||
args["hostname"] = name
|
||||
var host client.Host
|
||||
if err := c.Create("host", args, &host); err != nil {
|
||||
lastErr = err
|
||||
logrus.Error(err)
|
||||
} else {
|
||||
w.Add(host.Id)
|
||||
}
|
||||
}
|
||||
|
||||
if lastErr != nil {
|
||||
return lastErr
|
||||
}
|
||||
|
||||
return w.Wait()
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
inspectTypes = []string{"service", "container", "host", "project", "stack", "volume", "secret"}
|
||||
)
|
||||
|
||||
func InspectCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "inspect",
|
||||
Usage: "View details for " + replaceTypeNames(strings.Join(inspectTypes, ", ")),
|
||||
Description: `
|
||||
Inspect resources by ID or name in the current $RANCHER_ENVIRONMENT. Use '--env <envID>' or '--env <envName>' to select a different environment.
|
||||
|
||||
Example:
|
||||
$ rancher inspect 1s70
|
||||
`,
|
||||
ArgsUsage: "[ID NAME...]",
|
||||
Action: inspectResources,
|
||||
Flags: []cli.Flag{
|
||||
typesStringFlag(stopTypes),
|
||||
cli.BoolFlag{
|
||||
Name: "links",
|
||||
Usage: "Include URLs to actions and links in resource output",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "'json' or Custom format: '{{.kind}}'",
|
||||
Value: "json",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func inspectResources(ctx *cli.Context) error {
|
||||
writer := NewTableWriter(nil, ctx)
|
||||
forEachResource(ctx, inspectTypes, func(c *client.RancherClient, resource *client.Resource) (string, error) {
|
||||
mapResource := map[string]interface{}{}
|
||||
err := c.ById(resource.Type, resource.Id, &mapResource)
|
||||
if err != nil {
|
||||
return "-", err
|
||||
}
|
||||
if !ctx.Bool("links") {
|
||||
delete(mapResource, "links")
|
||||
delete(mapResource, "actions")
|
||||
}
|
||||
writer.Write(mapResource)
|
||||
return "-", nil
|
||||
})
|
||||
return writer.Err()
|
||||
}
|
||||
126
cmd/kubectl.go
126
cmd/kubectl.go
@@ -2,136 +2,40 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func KubectlCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "kubectl",
|
||||
Usage: "Run Kubectl on a k8s cluster in rancher",
|
||||
Description: "\nRun Kubectl on rancher cluster. Example: 'rancher kubectl get pod'\nTo specify a cluster, run `rancher --cluster 1c1 kubectl get pod`\n",
|
||||
Action: kubectl,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "help-kubectl",
|
||||
Usage: "Display the 'kubectl --help'",
|
||||
},
|
||||
},
|
||||
Name: "kubectl",
|
||||
Usage: "Run kubectl commands",
|
||||
Description: "Use the current kubectl context to run commands",
|
||||
Action: runKubectl,
|
||||
SkipFlagParsing: true,
|
||||
}
|
||||
}
|
||||
|
||||
func kubectl(ctx *cli.Context) error {
|
||||
return processExitCode(doKubectl(ctx))
|
||||
}
|
||||
|
||||
func doKubectl(ctx *cli.Context) error {
|
||||
c, err := GetRawClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func runKubectl(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) > 0 && (args[0] == "-h" || args[0] == "--help") {
|
||||
return cli.ShowCommandHelp(ctx, "kubectl")
|
||||
}
|
||||
|
||||
if len(args) > 0 && args[0] == "--help-kubectl" {
|
||||
return runKubectlHelp("")
|
||||
path, err := exec.LookPath("kubectl")
|
||||
if nil != err {
|
||||
return fmt.Errorf("kubectl is required to use this command: %s", err.Error())
|
||||
}
|
||||
|
||||
clusterID := ""
|
||||
clusterName := ctx.GlobalString("cluster")
|
||||
if clusterName != "" {
|
||||
cluster, err := Lookup(c, clusterName, "cluster")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterID = cluster.Id
|
||||
}
|
||||
|
||||
config, err := lookupConfig(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
env, err := c.Project.ById(config.Environment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if clusterID == "" {
|
||||
clusterID = env.ClusterId
|
||||
}
|
||||
|
||||
baseURL, err := baseURL(config.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serverAddress := fmt.Sprintf("%s/k8s/clusters/%s", baseURL, clusterID)
|
||||
|
||||
configTemplate := `apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
api-version: v1
|
||||
insecure-skip-tls-verify: true
|
||||
server: "%s"
|
||||
name: "%s"
|
||||
contexts:
|
||||
- context:
|
||||
cluster: "%s"
|
||||
user: "%s"
|
||||
name: "%s"
|
||||
current-context: "%s"
|
||||
users:
|
||||
- name: "%s"
|
||||
user:
|
||||
username: "%s"
|
||||
password: "%s"`
|
||||
|
||||
kubeConfig := fmt.Sprintf(configTemplate, serverAddress, env.Name, env.Name, env.Name, env.Name, env.Name, env.Name, config.AccessKey, config.SecretKey)
|
||||
|
||||
tempfile, err := ioutil.TempFile("", "kube-config")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(tempfile.Name())
|
||||
|
||||
_, err = tempfile.Write([]byte(kubeConfig))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tempfile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filePath := tempfile.Name()
|
||||
commandArgs := append([]string{"--kubeconfig", filePath}, ctx.Args()...)
|
||||
command := exec.Command("kubectl", commandArgs...)
|
||||
command.Env = os.Environ()
|
||||
command.Stdin = os.Stdin
|
||||
command.Stdout = os.Stdout
|
||||
command.Stderr = os.Stderr
|
||||
signal.Ignore(os.Interrupt)
|
||||
return command.Run()
|
||||
}
|
||||
|
||||
func runKubectlHelp(subcommand string) error {
|
||||
args := []string{"--help"}
|
||||
if subcommand != "" {
|
||||
args = []string{subcommand, "--help"}
|
||||
}
|
||||
cmd := exec.Command("kubectl", args...)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd := exec.Command(path, ctx.Args()...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
cmd.Stdin = os.Stdin
|
||||
err = cmd.Run()
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
149
cmd/login.go
Normal file
149
cmd/login.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/cli/cliclient"
|
||||
"github.com/rancher/cli/config"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func LoginCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "login",
|
||||
Aliases: []string{"l"},
|
||||
Usage: "Login to a Rancher server",
|
||||
Action: loginSetup,
|
||||
ArgsUsage: "None",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "token,t",
|
||||
Usage: "Token from the Rancher UI",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cacert",
|
||||
Usage: "Location of the CACerts to use",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "Name of the Server",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func loginSetup(ctx *cli.Context) error {
|
||||
path := ctx.GlobalString("cf")
|
||||
if path == "" {
|
||||
path = os.ExpandEnv("${HOME}/.rancher/cli.json")
|
||||
}
|
||||
|
||||
cf, err := loadConfig(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serverName := ctx.String("name")
|
||||
if serverName == "" {
|
||||
serverName = RandomName()
|
||||
}
|
||||
|
||||
serverConfig, ok := cf.Servers[serverName]
|
||||
if !ok {
|
||||
serverConfig = &config.ServerConfig{}
|
||||
}
|
||||
|
||||
if ctx.NArg() == 0 || ctx.NArg() > 1 {
|
||||
return errors.New("one server is required")
|
||||
}
|
||||
serverConfig.URL = ctx.Args().First()
|
||||
|
||||
if ctx.String("token") != "" {
|
||||
auth := SplitOnColon(ctx.String("token"))
|
||||
if len(auth) != 2 {
|
||||
return errors.New("invalid token")
|
||||
}
|
||||
serverConfig.AccessKey = auth[0]
|
||||
serverConfig.SecretKey = auth[1]
|
||||
serverConfig.TokenKey = ctx.String("token")
|
||||
} else {
|
||||
// This can be removed once username and password is accepted
|
||||
return errors.New("token flag is required")
|
||||
}
|
||||
|
||||
if ctx.String("cacert") != "" {
|
||||
cert, err := loadAndVerifyCert(ctx.String("cacert"))
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
serverConfig.CACerts = cert
|
||||
|
||||
}
|
||||
|
||||
proj, err := getDefaultProject(ctx, serverConfig)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the default server and proj for the user
|
||||
serverConfig.Project = proj
|
||||
cf.CurrentServer = serverName
|
||||
cf.Servers[serverName] = serverConfig
|
||||
|
||||
cf.Write()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDefaultProject(ctx *cli.Context, cf *config.ServerConfig) (string, error) {
|
||||
// Set this on global as it's an arg to login and will be needed for clients
|
||||
|
||||
mc, err := cliclient.NewMasterClient(cf)
|
||||
if nil != err {
|
||||
return "", err
|
||||
}
|
||||
|
||||
collection, err := mc.ManagementClient.Project.List(defaultListOpts(ctx))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
errMessage := fmt.Sprintf("invalid input, enter a number between 0 and %v", len(collection.Data)-1)
|
||||
|
||||
fmt.Println("Select your default Project:")
|
||||
for i, project := range collection.Data {
|
||||
fmt.Printf("%v %v %v\n", i, project.ID, project.Name)
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
var selection int
|
||||
|
||||
for {
|
||||
input, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
input = strings.TrimSpace(input)
|
||||
|
||||
if input != "" {
|
||||
i, err := strconv.Atoi(input)
|
||||
if nil != err {
|
||||
fmt.Println(errMessage)
|
||||
continue
|
||||
}
|
||||
if i <= len(collection.Data)-1 {
|
||||
selection = i
|
||||
break
|
||||
}
|
||||
fmt.Println(errMessage)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return collection.Data[selection].ID, nil
|
||||
}
|
||||
362
cmd/logs.go
362
cmd/logs.go
@@ -1,362 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
dclient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/libcompose/cli/logger"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/rancher/cli/monitor"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/rancher/rancher-docker-api-proxy"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var loggerFactory = logger.NewColorLoggerFactory()
|
||||
|
||||
func LogsCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "logs",
|
||||
Usage: "Fetch the logs of a container",
|
||||
Description: "\nExample:\n\t$ rancher logs web\n",
|
||||
ArgsUsage: "[CONTAINERNAME CONTAINERID...] or [SERVICENAME SERVICEID...]",
|
||||
Action: logsCommand,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "service,s",
|
||||
Usage: "Show service logs",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "sub-log",
|
||||
Usage: "Show service sub logs",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "follow,f",
|
||||
Usage: "Follow log output",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "tail",
|
||||
Value: 100,
|
||||
Usage: "Number of lines to show from the end of the logs",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "since",
|
||||
Usage: "Show logs since timestamp",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "timestamps,t",
|
||||
Usage: "Show timestamps",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func printPastLogs(c *client.RancherClient, nameCache map[string]string, services map[string]bool, ctx *cli.Context) (map[string]bool, error) {
|
||||
printed := map[string]bool{}
|
||||
|
||||
listOpts := defaultListOpts(nil)
|
||||
listOpts.Filters["sort"] = "id"
|
||||
listOpts.Filters["order"] = "desc"
|
||||
if !ctx.Bool("sub-log") {
|
||||
listOpts.Filters["subLog"] = "0"
|
||||
}
|
||||
|
||||
limit := ctx.Int("tail")
|
||||
if limit == 0 {
|
||||
return printed, nil
|
||||
}
|
||||
|
||||
if limit > 0 {
|
||||
listOpts.Filters["limit"] = limit
|
||||
}
|
||||
|
||||
logs, err := c.ServiceLog.List(listOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := len(logs.Data); i > 0; i-- {
|
||||
l := logs.Data[i-1]
|
||||
printed[l.Id] = true
|
||||
printServiceLog(c, nameCache, services, l)
|
||||
}
|
||||
|
||||
return printed, nil
|
||||
}
|
||||
|
||||
func printServiceLog(c *client.RancherClient, nameCache map[string]string, services map[string]bool, log client.ServiceLog) {
|
||||
if len(services) > 0 && !services[log.ServiceId] {
|
||||
return
|
||||
}
|
||||
|
||||
created, _ := time.Parse(time.RFC3339, log.Created)
|
||||
endTime, _ := time.Parse(time.RFC3339, log.EndTime)
|
||||
duration := endTime.Sub(created)
|
||||
durationStr := duration.String()
|
||||
if durationStr == "0" || strings.HasPrefix(durationStr, "-") {
|
||||
durationStr = "-"
|
||||
}
|
||||
if log.EndTime == "" {
|
||||
durationStr = "?"
|
||||
}
|
||||
if log.InstanceId == "" {
|
||||
log.InstanceId = "-"
|
||||
}
|
||||
|
||||
if nameCache[log.ServiceId] == "" {
|
||||
service, err := c.Service.ById(log.ServiceId)
|
||||
if nameCache[service.StackId] == "" {
|
||||
stack, err := c.Stack.ById(service.StackId)
|
||||
if err == nil {
|
||||
nameCache[service.StackId] = stack.Name
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
nameCache[log.ServiceId] = service.Name
|
||||
}
|
||||
nameCache[log.ServiceId] = fmt.Sprintf("%s/%s(%s)", nameCache[service.StackId], nameCache[log.ServiceId], log.ServiceId)
|
||||
}
|
||||
|
||||
fmt.Printf("%s %4s %s %s %s %6s %s: %s\n", log.Created, durationStr, strings.SplitN(log.TransactionId, "-", 2)[0],
|
||||
strings.ToUpper(log.Level), nameCache[log.ServiceId], log.InstanceId, log.EventType, log.Description)
|
||||
}
|
||||
|
||||
func serviceLogs(c *client.RancherClient, ctx *cli.Context) error {
|
||||
nameCache := map[string]string{}
|
||||
var sub *monitor.Subscription
|
||||
follow := ctx.Bool("follow")
|
||||
|
||||
if follow {
|
||||
m := monitor.New(c)
|
||||
sub = m.Subscribe()
|
||||
go func() {
|
||||
logrus.Fatal(m.Start())
|
||||
}()
|
||||
}
|
||||
|
||||
services, err := resolveServices(c, ctx.Args())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printed, err := printPastLogs(c, nameCache, services, ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if follow {
|
||||
for event := range sub.C {
|
||||
if event.ResourceType != "serviceLog" {
|
||||
continue
|
||||
}
|
||||
if printed[event.ResourceID] {
|
||||
continue
|
||||
}
|
||||
var log client.ServiceLog
|
||||
err := mapstructure.Decode(event.Data["resource"], &log)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to convert %#v: %v", event.Data["resource"], err)
|
||||
}
|
||||
printServiceLog(c, nameCache, services, log)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func logsCommand(ctx *cli.Context) error {
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ctx.Bool("service") {
|
||||
return serviceLogs(c, ctx)
|
||||
}
|
||||
|
||||
if len(ctx.Args()) == 0 {
|
||||
return fmt.Errorf("Please pass a container name")
|
||||
}
|
||||
|
||||
instances, err := resolveContainers(c, ctx.Args())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
listenSocks := map[string]*dclient.Client{}
|
||||
for _, i := range instances {
|
||||
if i.ExternalId == "" || i.HostId == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if dockerClient, ok := listenSocks[i.HostId]; ok {
|
||||
wg.Add(1)
|
||||
go func(dockerClient *dclient.Client, i client.Instance) {
|
||||
doLog(len(instances) <= 1, ctx, i, dockerClient)
|
||||
wg.Done()
|
||||
}(dockerClient, i)
|
||||
continue
|
||||
}
|
||||
|
||||
resource, err := Lookup(c, i.HostId, "host")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host, err := c.Host.ById(resource.Id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state := getHostState(host)
|
||||
if state != "active" && state != "inactive" {
|
||||
logrus.Errorf("Can not contact host %s in state %s", i.HostId, state)
|
||||
continue
|
||||
}
|
||||
|
||||
tempfile, err := ioutil.TempFile("", "docker-sock")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(tempfile.Name())
|
||||
|
||||
if err := tempfile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dockerHost := "unix://" + tempfile.Name()
|
||||
proxy := dockerapiproxy.NewProxy(c, host.Id, dockerHost)
|
||||
if err := proxy.Listen(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
logrus.Fatal(proxy.Serve())
|
||||
}()
|
||||
|
||||
dockerClient, err := dclient.NewClient(dockerHost, "", nil, nil)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to connect to host %s: %v", i.HostId, err)
|
||||
continue
|
||||
}
|
||||
|
||||
listenSocks[i.HostId] = dockerClient
|
||||
|
||||
wg.Add(1)
|
||||
go func(dockerClient *dclient.Client, i client.Instance) {
|
||||
doLog(len(instances) <= 1, ctx, i, dockerClient)
|
||||
wg.Done()
|
||||
}(dockerClient, i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func doLog(single bool, ctx *cli.Context, instance client.Instance, dockerClient *dclient.Client) error {
|
||||
c, err := dockerClient.ContainerInspect(context.Background(), instance.ExternalId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Since: ctx.String("since"),
|
||||
Timestamps: ctx.Bool("timestamps"),
|
||||
Follow: ctx.Bool("follow"),
|
||||
Tail: ctx.String("tail"),
|
||||
//Details: ctx.Bool("details"),
|
||||
}
|
||||
responseBody, err := dockerClient.ContainerLogs(context.Background(), c.ID, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
if c.Config.Tty {
|
||||
_, err = io.Copy(os.Stdout, responseBody)
|
||||
} else if single {
|
||||
_, err = stdcopy.StdCopy(os.Stdout, os.Stderr, responseBody)
|
||||
} else {
|
||||
l := loggerFactory.CreateContainerLogger(instance.Name)
|
||||
_, err = stdcopy.StdCopy(writerFunc(l.Out), writerFunc(l.Err), responseBody)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type writerFunc func(p []byte)
|
||||
|
||||
func (f writerFunc) Write(p []byte) (n int, err error) {
|
||||
f(p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func resolveServices(c *client.RancherClient, names []string) (map[string]bool, error) {
|
||||
services := map[string]bool{}
|
||||
for _, name := range names {
|
||||
resource, err := Lookup(c, name, "service")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
services[resource.Id] = true
|
||||
}
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func resolveContainers(c *client.RancherClient, names []string) ([]client.Instance, error) {
|
||||
result := []client.Instance{}
|
||||
|
||||
for _, name := range names {
|
||||
resource, err := Lookup(c, name, "container", "service")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resource.Type == "container" {
|
||||
i, err := c.Instance.ById(resource.Id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, *i)
|
||||
} else if resource.Type == "environment" {
|
||||
services := client.ServiceCollection{}
|
||||
err := c.GetLink(*resource, "services", &services)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceIds := []string{}
|
||||
for _, s := range services.Data {
|
||||
serviceIds = append(serviceIds, s.Id)
|
||||
}
|
||||
instances, err := resolveContainers(c, serviceIds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, instances...)
|
||||
} else {
|
||||
instances := client.InstanceCollection{}
|
||||
err := c.GetLink(*resource, "instances", &instances)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, instance := range instances.Data {
|
||||
result = append(result, instance)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
98
cmd/project.go
Normal file
98
cmd/project.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
managementClient "github.com/rancher/types/client/management/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
type ProjectData struct {
|
||||
Project managementClient.Project
|
||||
}
|
||||
|
||||
func ProjectCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "projects",
|
||||
Aliases: []string{"project"},
|
||||
Usage: "Operations on projects",
|
||||
Action: defaultAction(projectLs),
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "ls",
|
||||
Usage: "List projects",
|
||||
Description: "\nLists all projects in the current cluster.",
|
||||
ArgsUsage: "None",
|
||||
Action: projectLs,
|
||||
},
|
||||
{
|
||||
Name: "create",
|
||||
Usage: "Create a project",
|
||||
Description: "\nCreates a project in the current cluster.",
|
||||
ArgsUsage: "[NEWPROJECTNAME...]",
|
||||
Action: projectCreate,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func projectLs(ctx *cli.Context) error {
|
||||
collection, err := getProjectList(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writer := NewTableWriter([][]string{
|
||||
{"ID", "Project.ID"},
|
||||
{"NAME", "Project.Name"},
|
||||
{"STATE", "Project.State"},
|
||||
}, ctx)
|
||||
|
||||
defer writer.Close()
|
||||
|
||||
for _, item := range collection.Data {
|
||||
writer.Write(&ProjectData{
|
||||
Project: item,
|
||||
})
|
||||
}
|
||||
|
||||
return writer.Err()
|
||||
}
|
||||
|
||||
func projectCreate(ctx *cli.Context) error {
|
||||
config, err := lookupConfig(ctx)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ctx.NArg() == 0 {
|
||||
return errors.New("name is required")
|
||||
}
|
||||
|
||||
name := ctx.Args().First()
|
||||
newProj := &managementClient.Project{
|
||||
Name: name,
|
||||
ClusterId: strings.Split(config.Project, ":")[0],
|
||||
}
|
||||
|
||||
c.ManagementClient.Project.Create(newProj)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getProjectList(ctx *cli.Context) (*managementClient.ProjectCollection, error) {
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
collection, err := c.ManagementClient.Project.List(defaultListOpts(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return collection, nil
|
||||
}
|
||||
@@ -3,8 +3,9 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/c-bata/go-prompt"
|
||||
"github.com/rancher/cli/rancher_prompt"
|
||||
|
||||
"github.com/c-bata/go-prompt"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
|
||||
280
cmd/ps.go
280
cmd/ps.go
@@ -2,253 +2,83 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
projectClient "github.com/rancher/types/client/project/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
type WorkLoadPS struct {
|
||||
WorkLoad projectClient.Workload
|
||||
Name string // this is built from namespace/name
|
||||
}
|
||||
|
||||
type PodPS struct {
|
||||
Pod projectClient.Pod
|
||||
Name string // this is built from namespace/name
|
||||
}
|
||||
|
||||
func PsCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "ps",
|
||||
Usage: "Show services/containers",
|
||||
Description: "\nLists all services or containers in the current $RANCHER_ENVIRONMENT. Use `--env <envID>` or `--env <envName>` to select a different environment.\n\nExample:\n\t$ rancher ps\n\t$ rancher ps -c\n\t$ rancher --env 1a5 ps\n",
|
||||
ArgsUsage: "None",
|
||||
Action: servicePs,
|
||||
Flags: []cli.Flag{
|
||||
listAllFlag(),
|
||||
listSystemFlag(),
|
||||
cli.BoolFlag{
|
||||
Name: "containers,c",
|
||||
Usage: "Display containers",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet,q",
|
||||
Usage: "Only display IDs",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "'json' or Custom format: '{{.Service.Id}} {{.Service.Name}} {{.Service.LaunchConfig.ImageUuid}}'",
|
||||
},
|
||||
},
|
||||
Usage: "Show workloads and pods",
|
||||
Description: "Prints out a table of pods not associated with a workload then a table of workloads",
|
||||
Action: psLs,
|
||||
}
|
||||
}
|
||||
|
||||
func GetStackMap(c *client.RancherClient) map[string]client.Stack {
|
||||
result := map[string]client.Stack{}
|
||||
stacks, err := c.Stack.List(baseListOpts())
|
||||
if err != nil {
|
||||
return result
|
||||
}
|
||||
|
||||
for _, stack := range stacks.Data {
|
||||
result[stack.Id] = stack
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
type PsData struct {
|
||||
Service interface{}
|
||||
Name string
|
||||
LaunchConfig interface{}
|
||||
Stack client.Stack
|
||||
CombinedState string
|
||||
ID string
|
||||
}
|
||||
|
||||
type ContainerPsData struct {
|
||||
ID string
|
||||
Container client.Container
|
||||
CombinedState string
|
||||
DockerID string
|
||||
}
|
||||
|
||||
func servicePs(ctx *cli.Context) error {
|
||||
func psLs(ctx *cli.Context) error {
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ctx.Bool("containers") {
|
||||
return hostContainerPs(ctx, c)
|
||||
workLoads, err := c.ProjectClient.Workload.List(defaultListOpts(ctx))
|
||||
if nil != err {
|
||||
fmt.Println("HERE")
|
||||
return err
|
||||
}
|
||||
|
||||
if len(ctx.Args()) > 0 {
|
||||
return serviceContainersPs(ctx, c, ctx.Args())
|
||||
}
|
||||
|
||||
stackMap := GetStackMap(c)
|
||||
|
||||
collection, err := c.Service.List(defaultListOpts(ctx))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "service list failed")
|
||||
}
|
||||
|
||||
collectionContainers, err := c.Container.List(defaultListOpts(ctx))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "container list failed")
|
||||
}
|
||||
|
||||
writer := NewTableWriter([][]string{
|
||||
{"ID", "Service.Id"},
|
||||
{"TYPE", "Service.Type"},
|
||||
wlWriter := NewTableWriter([][]string{
|
||||
{"NAME", "Name"},
|
||||
{"IMAGE", "LaunchConfig.ImageUuid"},
|
||||
{"STATE", "CombinedState"},
|
||||
{"SCALE", "{{len .Service.InstanceIds}}/{{.Service.Scale}}"},
|
||||
{"ENDPOINTS", "{{range .Service.PublicEndpoints}}{{.AgentIpAddress}}:{{.PublicPort}}:{{.PrivatePort}}/{{.Protocol}} {{end}}"},
|
||||
{"DETAIL", "Service.TransitioningMessage"},
|
||||
{"STATE", "WorkLoad.State"},
|
||||
{"SCALE", "WorkLoad.Scale"},
|
||||
{"DETAIL", "WorkLoad.TransitioningMessage"},
|
||||
}, ctx)
|
||||
|
||||
defer writer.Close()
|
||||
defer wlWriter.Close()
|
||||
|
||||
for _, item := range collection.Data {
|
||||
if item.LaunchConfig != nil {
|
||||
item.LaunchConfig.ImageUuid = strings.TrimPrefix(item.LaunchConfig.ImageUuid, "docker:")
|
||||
}
|
||||
item.Type = "service"
|
||||
|
||||
combined := item.HealthState
|
||||
if item.State != "active" || combined == "" {
|
||||
combined = item.State
|
||||
}
|
||||
if item.LaunchConfig == nil {
|
||||
item.LaunchConfig = &client.LaunchConfig{}
|
||||
}
|
||||
writer.Write(PsData{
|
||||
ID: item.Id,
|
||||
Service: item,
|
||||
Name: fmt.Sprintf("%s/%s", stackMap[item.StackId].Name, item.Name),
|
||||
LaunchConfig: *item.LaunchConfig,
|
||||
Stack: stackMap[item.StackId],
|
||||
CombinedState: combined,
|
||||
})
|
||||
for _, sidekick := range item.SecondaryLaunchConfigs {
|
||||
sidekick.ImageUuid = strings.TrimPrefix(sidekick.ImageUuid, "docker:")
|
||||
item.Type = "sidekick"
|
||||
writer.Write(PsData{
|
||||
ID: item.Id,
|
||||
Service: item,
|
||||
Name: fmt.Sprintf("%s/%s/%s", stackMap[item.StackId].Name, item.Name,
|
||||
sidekick.Name),
|
||||
LaunchConfig: sidekick,
|
||||
Stack: stackMap[item.StackId],
|
||||
CombinedState: combined,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, item := range collectionContainers.Data {
|
||||
if len(item.ServiceIds) == 0 && item.StackId != "" {
|
||||
launchConfig := client.LaunchConfig{}
|
||||
launchConfig.ImageUuid = item.Image
|
||||
|
||||
service := client.Service{}
|
||||
service.Id = item.Id
|
||||
service.Type = "standalone"
|
||||
service.InstanceIds = []string{item.Id}
|
||||
service.Scale = 1
|
||||
service.PublicEndpoints = item.PublicEndpoints
|
||||
service.TransitioningMessage = item.TransitioningMessage
|
||||
|
||||
combined := item.HealthState
|
||||
if item.State != "active" || combined == "" {
|
||||
combined = item.State
|
||||
}
|
||||
writer.Write(PsData{
|
||||
ID: item.Id,
|
||||
Service: service,
|
||||
Name: fmt.Sprintf("%s/%s", stackMap[item.StackId].Name, item.Name),
|
||||
LaunchConfig: launchConfig,
|
||||
Stack: stackMap[item.StackId],
|
||||
CombinedState: combined,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return writer.Err()
|
||||
}
|
||||
|
||||
func serviceContainersPs(ctx *cli.Context, c *client.RancherClient, names []string) error {
|
||||
containerList := []client.Container{}
|
||||
|
||||
for _, name := range names {
|
||||
service, err := Lookup(c, name, "service")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var containers client.ContainerCollection
|
||||
if err := c.GetLink(*service, "instances", &containers); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
containerList = append(containerList, containers.Data...)
|
||||
}
|
||||
|
||||
return containerPs(ctx, containerList)
|
||||
}
|
||||
|
||||
func hostContainerPs(ctx *cli.Context, c *client.RancherClient) error {
|
||||
if len(ctx.Args()) == 0 {
|
||||
containerList, err := c.Container.List(defaultListOpts(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return containerPs(ctx, containerList.Data)
|
||||
}
|
||||
|
||||
containers := []client.Container{}
|
||||
for _, hostname := range ctx.Args() {
|
||||
host, err := Lookup(c, hostname, "host")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var containerList client.ContainerCollection
|
||||
if err := c.GetLink(*host, "instances", &containerList); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
containers = append(containers, containerList.Data...)
|
||||
}
|
||||
|
||||
return containerPs(ctx, containers)
|
||||
}
|
||||
|
||||
func containerPs(ctx *cli.Context, containers []client.Container) error {
|
||||
writer := NewTableWriter([][]string{
|
||||
{"ID", "ID"},
|
||||
{"NAME", "Container.Name"},
|
||||
{"IMAGE", "Container.ImageUuid"},
|
||||
{"STATE", "CombinedState"},
|
||||
{"HOST", "Container.HostId"},
|
||||
{"IP", "Container.PrimaryIpAddress"},
|
||||
{"DOCKER_ID", "DockerID"},
|
||||
{"DETAIL", "Container.TransitioningMessage"},
|
||||
//TODO: {"PORTS", "{{ports .Container.Ports}}"},
|
||||
}, ctx)
|
||||
defer writer.Close()
|
||||
|
||||
for _, container := range containers {
|
||||
container.ImageUuid = strings.TrimPrefix(container.ImageUuid, "docker:")
|
||||
combined := container.HealthState
|
||||
if container.State != "running" || combined == "" {
|
||||
combined = container.State
|
||||
}
|
||||
containerID := container.ExternalId
|
||||
if len(containerID) > 12 {
|
||||
containerID = containerID[:12]
|
||||
}
|
||||
writer.Write(ContainerPsData{
|
||||
Container: container,
|
||||
ID: container.Id,
|
||||
DockerID: containerID,
|
||||
CombinedState: combined,
|
||||
for _, item := range workLoads.Data {
|
||||
wlWriter.Write(&WorkLoadPS{
|
||||
WorkLoad: item,
|
||||
Name: fmt.Sprintf("%s/%s", item.NamespaceId, item.Name),
|
||||
})
|
||||
}
|
||||
|
||||
return writer.Err()
|
||||
// Add an empty line to the stack to separate the tables
|
||||
defer fmt.Println("")
|
||||
|
||||
opts := defaultListOpts(ctx)
|
||||
opts.Filters["workloadId"] = ""
|
||||
|
||||
orphanPods, err := c.ProjectClient.Pod.List(opts)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
podWriter := NewTableWriter([][]string{
|
||||
{"NAME", "Name"},
|
||||
{"STATE", "Pod.State"},
|
||||
{"DETAIL", "Pod.TransitioningMessage"},
|
||||
}, ctx)
|
||||
|
||||
defer podWriter.Close()
|
||||
|
||||
for _, item := range orphanPods.Data {
|
||||
podWriter.Write(&PodPS{
|
||||
Pod: item,
|
||||
Name: fmt.Sprintf("%s/%s", item.NamespaceId, item.Name),
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
72
cmd/pull.go
72
cmd/pull.go
@@ -1,72 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/fatih/color"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
"time"
|
||||
)
|
||||
|
||||
func PullCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "pull",
|
||||
Usage: "Pull images on hosts that are in the current environment. Examples: rancher pull ubuntu",
|
||||
Action: pullImages,
|
||||
Subcommands: []cli.Command{},
|
||||
Flags: []cli.Flag{
|
||||
cli.StringSliceFlag{
|
||||
Name: "hosts",
|
||||
Usage: "Specify which host should pull images. By default it will pull images on all the hosts in the current environment. Examples: rancher pull --hosts 1h1 --hosts 1h2 ubuntu",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func pullImages(ctx *cli.Context) error {
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ctx.NArg() == 0 {
|
||||
return cli.ShowCommandHelp(ctx, "")
|
||||
}
|
||||
image := ctx.Args()[0]
|
||||
|
||||
hosts := ctx.StringSlice("hosts")
|
||||
if len(hosts) == 0 {
|
||||
hts, err := c.Host.List(defaultListOpts(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ht := range hts.Data {
|
||||
hosts = append(hosts, ht.Id)
|
||||
}
|
||||
}
|
||||
pullTask := client.PullTask{
|
||||
Mode: "all",
|
||||
Image: image,
|
||||
HostIds: hosts,
|
||||
}
|
||||
task, err := c.PullTask.Create(&pullTask)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cl := getRandomColor()
|
||||
lastMsg := ""
|
||||
for {
|
||||
if task.Transitioning != "yes" {
|
||||
fmt.Printf("Finished pulling image %s\n", image)
|
||||
return nil
|
||||
}
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
if task.TransitioningMessage != lastMsg {
|
||||
color.New(cl).Printf("Pulling image. Status: %s\n", task.TransitioningMessage)
|
||||
lastMsg = task.TransitioningMessage
|
||||
}
|
||||
task, err = c.PullTask.ById(task.Id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/rancher/go-rancher/catalog"
|
||||
)
|
||||
|
||||
func askQuestions(answers map[string]interface{}, templateVersion catalog.TemplateVersion) (map[string]interface{}, error) {
|
||||
result := map[string]interface{}{}
|
||||
for _, q := range templateVersion.Questions {
|
||||
question := catalog.Question{}
|
||||
err := mapstructure.Decode(q, &question)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if answer, ok := answers[question.Variable]; ok {
|
||||
result[question.Variable] = answer
|
||||
} else {
|
||||
result[question.Variable] = askQuestion(question)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func askQuestion(question catalog.Question) interface{} {
|
||||
if len(question.Description) > 0 {
|
||||
fmt.Println(question.Description)
|
||||
}
|
||||
fmt.Printf("%s %s[%s]: ", question.Label, question.Variable, question.Default)
|
||||
|
||||
answer, err := bufio.NewReader(os.Stdin).ReadString('\n')
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
answer = strings.TrimSpace(answer)
|
||||
if answer == "" {
|
||||
answer = question.Default
|
||||
}
|
||||
|
||||
return answer
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
restartTypes = []string{"service", "container"}
|
||||
)
|
||||
|
||||
func RestartCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "restart",
|
||||
Usage: "Restart " + strings.Join(restartTypes, ", "),
|
||||
Description: "\nRestart resources by ID or name in the current $RANCHER_ENVIRONMENT. Use `--env <envID>` or `--env <envName>` to select a different environment.\n\nExample:\n\t$ rancher restart 1s70\n\t$ rancher --env 1a5 restart stackName/serviceName \n",
|
||||
ArgsUsage: "[ID NAME...]",
|
||||
Action: restartResources,
|
||||
Flags: []cli.Flag{
|
||||
typesStringFlag(restartTypes),
|
||||
cli.IntFlag{
|
||||
Name: "batch-size",
|
||||
Usage: "Number of containers to restart at a time",
|
||||
Value: 1,
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "interval",
|
||||
Usage: "Interval in millisecond to wait between restarts",
|
||||
Value: 1000,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func restartResources(ctx *cli.Context) error {
|
||||
return forEachResource(ctx, restartTypes, func(c *client.RancherClient, resource *client.Resource) (string, error) {
|
||||
action, err := pickAction(resource, "restart")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
//todo: revisit restart policy
|
||||
err = c.Action(resource.Type, action, resource, nil, resource)
|
||||
return resource.Id, err
|
||||
})
|
||||
}
|
||||
62
cmd/rm.go
62
cmd/rm.go
@@ -1,62 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
rmTypes = []string{"service", "container", "stack", "host", "volume", "secret"}
|
||||
)
|
||||
|
||||
func RmCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "rm",
|
||||
Usage: "Delete " + strings.Join(rmTypes, ", "),
|
||||
Description: "\nDeletes resources by ID or name in the current $RANCHER_ENVIRONMENT. Use `--env <envID>` or `--env <envName>` to select a different environment.\n\nExample:\n\t$ rancher rm 1s70\n\t$ rancher --env 1a5 rm stackName/serviceName \n",
|
||||
ArgsUsage: "[ID NAME...]",
|
||||
Action: deleteResources,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringSliceFlag{
|
||||
Name: "type",
|
||||
Usage: "Restrict delete to specific types",
|
||||
Value: &cli.StringSlice{},
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "stop,s",
|
||||
Usage: "Stop or deactivate resource first if needed before deleting",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func deleteResources(ctx *cli.Context) error {
|
||||
return forEachResource(ctx, rmTypes, func(c *client.RancherClient, resource *client.Resource) (string, error) {
|
||||
if ctx.Bool("stop") {
|
||||
action, err := pickAction(resource, "stop", "deactivate")
|
||||
if err == nil {
|
||||
w, err := NewWaiter(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := c.Action(resource.Type, action, resource, nil, resource); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := w.Add(resource.Id).Wait(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
err := c.Delete(resource)
|
||||
if v, ok := err.(*client.ApiError); ok && v.StatusCode == 405 {
|
||||
action, err := pickAction(resource, "stop", "deactivate")
|
||||
if err == nil {
|
||||
fmt.Printf("error: Must call %s on %s %s before removing\n", action, resource.Type, resource.Id)
|
||||
}
|
||||
}
|
||||
return resource.Id, err
|
||||
})
|
||||
}
|
||||
305
cmd/run.go
305
cmd/run.go
@@ -1,13 +1,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@@ -28,7 +21,7 @@ import (
|
||||
--device-read-iops=[] Limit read rate (IO per second) from a device
|
||||
--device-write-bps=[] Limit write rate (bytes per second) to a device
|
||||
--device-write-iops=[] Limit write rate (IO per second) to a device
|
||||
--disable-content-trust=true Skip image verification
|
||||
--disable-content-trust=true Skip Image verification
|
||||
--dns-opt=[] Set DNS options
|
||||
-e, --env=[] Set environment variables
|
||||
--env-file=[] Read in a file of environment variables
|
||||
@@ -119,7 +112,7 @@ func RunCommand() cli.Command {
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "entrypoint",
|
||||
Usage: "Overwrite the default ENTRYPOINT of the image",
|
||||
Usage: "Overwrite the default ENTRYPOINT of the Image",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "expose",
|
||||
@@ -265,302 +258,12 @@ func RunCommand() cli.Command {
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "pull",
|
||||
Usage: "Always pull image on container start",
|
||||
Usage: "Always pull Image on container start",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func ParseName(c *client.RancherClient, name string) (*client.Stack, string, error) {
|
||||
stackName := ""
|
||||
serviceName := name
|
||||
|
||||
parts := strings.SplitN(name, "/", 2)
|
||||
if len(parts) == 2 {
|
||||
stackName = parts[0]
|
||||
serviceName = parts[1]
|
||||
}
|
||||
|
||||
stack, err := GetOrCreateDefaultStack(c, stackName)
|
||||
if err != nil {
|
||||
return stack, "", err
|
||||
}
|
||||
|
||||
if serviceName == "" {
|
||||
serviceName = RandomName()
|
||||
}
|
||||
|
||||
return stack, serviceName, nil
|
||||
}
|
||||
|
||||
func serviceRun(ctx *cli.Context) error {
|
||||
c, err := GetClient(ctx)
|
||||
if ctx.NArg() < 1 {
|
||||
return cli.NewExitError("Image name is required as the first argument", 1)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ctx.IsSet("scale") {
|
||||
launchConfig := &client.LaunchConfig{
|
||||
//BlkioDeviceOptions:
|
||||
BlkioWeight: ctx.Int64("blkio-weight"),
|
||||
CapAdd: ctx.StringSlice("cap-add"),
|
||||
CapDrop: ctx.StringSlice("cap-drop"),
|
||||
//CpuSet: ctx.String(""),
|
||||
CgroupParent: ctx.String("cgroup-parent"),
|
||||
CpuSetMems: ctx.String("cpuset-mems"),
|
||||
CpuPeriod: ctx.Int64("cpu-period"),
|
||||
CpuQuota: ctx.Int64("cpu-quota"),
|
||||
CpuShares: ctx.Int64("cpu-shares"),
|
||||
Devices: ctx.StringSlice("device"),
|
||||
Dns: ctx.StringSlice("dns"),
|
||||
DnsOpt: ctx.StringSlice("dns-opt"),
|
||||
DnsSearch: ctx.StringSlice("dns-search"),
|
||||
EntryPoint: ctx.StringSlice("entrypoint"),
|
||||
Expose: ctx.StringSlice("expose"),
|
||||
GroupAdd: ctx.StringSlice("group-add"),
|
||||
HealthCmd: ctx.StringSlice("health-cmd"),
|
||||
HealthTimeout: ctx.Int64("health-timeout"),
|
||||
HealthInterval: ctx.Int64("health-interval"),
|
||||
HealthRetries: ctx.Int64("health-retries"),
|
||||
Hostname: ctx.String("hostname"),
|
||||
Image: ctx.Args()[0],
|
||||
Ip: ctx.String("ip"),
|
||||
Ip6: ctx.String("ip6"),
|
||||
IpcMode: ctx.String("ipc"),
|
||||
Isolation: ctx.String("isolation"),
|
||||
KernelMemory: ctx.Int64("kernel-memory"),
|
||||
Labels: map[string]string{},
|
||||
Environment: map[string]string{},
|
||||
//LogConfig:
|
||||
Memory: ctx.Int64("memory"),
|
||||
MemoryReservation: ctx.Int64("memory-reservation"),
|
||||
MemorySwap: ctx.Int64("memory-swap"),
|
||||
MemorySwappiness: ctx.Int64("memory-swappiness"),
|
||||
//NetworkIds: ctx.StringSlice("networkids"),
|
||||
NetAlias: ctx.StringSlice("net-alias"),
|
||||
NetworkMode: ctx.String("net"),
|
||||
OomKillDisable: ctx.Bool("oom-kill-disable"),
|
||||
OomScoreAdj: ctx.Int64("oom-score-adj"),
|
||||
PidMode: ctx.String("pid"),
|
||||
PidsLimit: ctx.Int64("pids-limit"),
|
||||
Ports: ctx.StringSlice("publish"),
|
||||
Privileged: ctx.Bool("privileged"),
|
||||
PublishAllPorts: ctx.Bool("publish-all"),
|
||||
ReadOnly: ctx.Bool("read-only"),
|
||||
//todo: add RunInit
|
||||
//RunInit: ctx.Bool("init"),
|
||||
SecurityOpt: ctx.StringSlice("security-opt"),
|
||||
ShmSize: ctx.Int64("shm-size"),
|
||||
StdinOpen: ctx.Bool("interactive"),
|
||||
StopSignal: ctx.String("stop-signal"),
|
||||
Tty: ctx.Bool("tty"),
|
||||
User: ctx.String("user"),
|
||||
Uts: ctx.String("uts"),
|
||||
VolumeDriver: ctx.String("volume-driver"),
|
||||
WorkingDir: ctx.String("workdir"),
|
||||
DataVolumes: ctx.StringSlice("volume"),
|
||||
}
|
||||
|
||||
if ctx.String("log-driver") != "" || len(ctx.StringSlice("log-opt")) > 0 {
|
||||
launchConfig.LogConfig = &client.LogConfig{
|
||||
Driver: ctx.String("log-driver"),
|
||||
Config: map[string]string{},
|
||||
}
|
||||
for _, opt := range ctx.StringSlice("log-opt") {
|
||||
parts := strings.SplitN(opt, "=", 2)
|
||||
if len(parts) > 1 {
|
||||
launchConfig.LogConfig.Config[parts[0]] = parts[1]
|
||||
} else {
|
||||
launchConfig.LogConfig.Config[parts[0]] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, label := range ctx.StringSlice("label") {
|
||||
parts := strings.SplitN(label, "=", 2)
|
||||
value := ""
|
||||
if len(parts) > 1 {
|
||||
value = parts[1]
|
||||
}
|
||||
launchConfig.Labels[parts[0]] = value
|
||||
}
|
||||
|
||||
for _, env := range ctx.StringSlice("env") {
|
||||
parts := strings.SplitN(env, "=", 2)
|
||||
value := ""
|
||||
|
||||
if len(parts) > 1 {
|
||||
value = parts[1]
|
||||
|
||||
if parts[0] == "" {
|
||||
errMsg := fmt.Sprintf("invalid argument \"%s\" for e: invalid environment variable: %s\nSee 'rancher run --help'.", env, env)
|
||||
return cli.NewExitError(errMsg, 1)
|
||||
}
|
||||
} else if len(parts) == 1 {
|
||||
value = os.Getenv(parts[0])
|
||||
}
|
||||
launchConfig.Environment[parts[0]] = value
|
||||
}
|
||||
|
||||
if ctx.Bool("schedule-global") {
|
||||
launchConfig.Labels["io.rancher.scheduler.global"] = "true"
|
||||
}
|
||||
|
||||
if ctx.Bool("pull") {
|
||||
launchConfig.Labels["io.rancher.container.pull_image"] = "always"
|
||||
}
|
||||
|
||||
args := ctx.Args()[1:]
|
||||
|
||||
if len(args) > 0 {
|
||||
launchConfig.Command = args
|
||||
}
|
||||
|
||||
stack, name, err := ParseName(c, ctx.String("name"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
service := &client.Service{
|
||||
Name: name,
|
||||
StackId: stack.Id,
|
||||
LaunchConfig: launchConfig,
|
||||
Scale: int64(ctx.Int("scale")),
|
||||
}
|
||||
|
||||
service, err = c.Service.Create(service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return WaitFor(ctx, service.Id)
|
||||
}
|
||||
container := &client.Container{
|
||||
//BlkioDeviceOptions:
|
||||
BlkioWeight: ctx.Int64("blkio-weight"),
|
||||
CapAdd: ctx.StringSlice("cap-add"),
|
||||
CapDrop: ctx.StringSlice("cap-drop"),
|
||||
//CpuSet: ctx.String(""),
|
||||
CgroupParent: ctx.String("cgroup-parent"),
|
||||
CpuSetMems: ctx.String("cpuset-mems"),
|
||||
CpuPeriod: ctx.Int64("cpu-period"),
|
||||
CpuQuota: ctx.Int64("cpu-quota"),
|
||||
CpuShares: ctx.Int64("cpu-shares"),
|
||||
Devices: ctx.StringSlice("device"),
|
||||
Dns: ctx.StringSlice("dns"),
|
||||
DnsOpt: ctx.StringSlice("dns-opt"),
|
||||
DnsSearch: ctx.StringSlice("dns-search"),
|
||||
EntryPoint: ctx.StringSlice("entrypoint"),
|
||||
Expose: ctx.StringSlice("expose"),
|
||||
GroupAdd: ctx.StringSlice("group-add"),
|
||||
HealthCmd: ctx.StringSlice("health-cmd"),
|
||||
HealthTimeout: ctx.Int64("health-timeout"),
|
||||
HealthInterval: ctx.Int64("health-interval"),
|
||||
HealthRetries: ctx.Int64("health-retries"),
|
||||
Hostname: ctx.String("hostname"),
|
||||
Image: ctx.Args()[0],
|
||||
Ip: ctx.String("ip"),
|
||||
Ip6: ctx.String("ip6"),
|
||||
IpcMode: ctx.String("ipc"),
|
||||
Isolation: ctx.String("isolation"),
|
||||
KernelMemory: ctx.Int64("kernel-memory"),
|
||||
Labels: map[string]string{},
|
||||
Environment: map[string]string{},
|
||||
//LogConfig:
|
||||
Memory: ctx.Int64("memory"),
|
||||
MemoryReservation: ctx.Int64("memory-reservation"),
|
||||
MemorySwap: ctx.Int64("memory-swap"),
|
||||
MemorySwappiness: ctx.Int64("memory-swappiness"),
|
||||
//NetworkIds: ctx.StringSlice("networkids"),
|
||||
NetworkMode: ctx.String("net"),
|
||||
OomKillDisable: ctx.Bool("oom-kill-disable"),
|
||||
OomScoreAdj: ctx.Int64("oom-score-adj"),
|
||||
PidMode: ctx.String("pid"),
|
||||
PidsLimit: ctx.Int64("pids-limit"),
|
||||
Ports: ctx.StringSlice("publish"),
|
||||
Privileged: ctx.Bool("privileged"),
|
||||
PublishAllPorts: ctx.Bool("publish-all"),
|
||||
ReadOnly: ctx.Bool("read-only"),
|
||||
//todo: add RunInit
|
||||
//RunInit: ctx.Bool("init"),
|
||||
SecurityOpt: ctx.StringSlice("security-opt"),
|
||||
ShmSize: ctx.Int64("shm-size"),
|
||||
StdinOpen: ctx.Bool("interactive"),
|
||||
StopSignal: ctx.String("stop-signal"),
|
||||
Tty: ctx.Bool("tty"),
|
||||
User: ctx.String("user"),
|
||||
Uts: ctx.String("uts"),
|
||||
VolumeDriver: ctx.String("volume-driver"),
|
||||
WorkingDir: ctx.String("workdir"),
|
||||
DataVolumes: ctx.StringSlice("volume"),
|
||||
}
|
||||
if ctx.IsSet("it") {
|
||||
container.StdinOpen = true
|
||||
container.Tty = true
|
||||
}
|
||||
|
||||
if ctx.String("log-driver") != "" || len(ctx.StringSlice("log-opt")) > 0 {
|
||||
container.LogConfig = &client.LogConfig{
|
||||
Driver: ctx.String("log-driver"),
|
||||
Config: map[string]string{},
|
||||
}
|
||||
for _, opt := range ctx.StringSlice("log-opt") {
|
||||
parts := strings.SplitN(opt, "=", 2)
|
||||
if len(parts) > 1 {
|
||||
container.LogConfig.Config[parts[0]] = parts[1]
|
||||
} else {
|
||||
container.LogConfig.Config[parts[0]] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, label := range ctx.StringSlice("label") {
|
||||
parts := strings.SplitN(label, "=", 2)
|
||||
value := ""
|
||||
if len(parts) > 1 {
|
||||
value = parts[1]
|
||||
}
|
||||
container.Labels[parts[0]] = value
|
||||
}
|
||||
|
||||
for _, env := range ctx.StringSlice("env") {
|
||||
parts := strings.SplitN(env, "=", 2)
|
||||
value := ""
|
||||
|
||||
if len(parts) > 1 {
|
||||
value = parts[1]
|
||||
|
||||
if parts[0] == "" {
|
||||
errMsg := fmt.Sprintf("invalid argument \"%s\" for e: invalid environment variable: %s\nSee 'rancher run --help'.", env, env)
|
||||
return cli.NewExitError(errMsg, 1)
|
||||
}
|
||||
} else if len(parts) == 1 {
|
||||
value = os.Getenv(parts[0])
|
||||
}
|
||||
container.Environment[parts[0]] = value
|
||||
}
|
||||
|
||||
if ctx.Bool("pull") {
|
||||
container.Labels["io.rancher.container.pull_image"] = "always"
|
||||
}
|
||||
|
||||
args := ctx.Args()[1:]
|
||||
|
||||
if len(args) > 0 {
|
||||
container.Command = args
|
||||
}
|
||||
|
||||
_, name, err := ParseName(c, ctx.String("name"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.Name = name
|
||||
cont, err := c.Container.Create(container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return WaitFor(ctx, cont.Id)
|
||||
return nil
|
||||
}
|
||||
|
||||
74
cmd/scale.go
74
cmd/scale.go
@@ -1,74 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func ScaleCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "scale",
|
||||
Usage: "Set number of containers to run for a service",
|
||||
Action: serviceScale,
|
||||
Description: "\nNumbers are specified in the form `service=num` as arguments.\n\nExample:\n\t$ rancher scale web=2 worker=3\n",
|
||||
ArgsUsage: "[SERVICE=NUM...]",
|
||||
}
|
||||
}
|
||||
|
||||
type scaleUp struct {
|
||||
name string
|
||||
resource *client.Resource
|
||||
scale int
|
||||
}
|
||||
|
||||
func serviceScale(ctx *cli.Context) error {
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
servicesToScale := []scaleUp{}
|
||||
for _, arg := range ctx.Args() {
|
||||
scale := 1
|
||||
parts := strings.SplitN(arg, "=", 2)
|
||||
if len(parts) > 1 {
|
||||
i, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid format for %s, expecting name=scale, example: web=2", arg)
|
||||
}
|
||||
scale = i
|
||||
}
|
||||
|
||||
resource, err := Lookup(c, parts[0], "service")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
servicesToScale = append(servicesToScale, scaleUp{
|
||||
name: parts[0],
|
||||
resource: resource,
|
||||
scale: scale,
|
||||
})
|
||||
}
|
||||
|
||||
w, err := NewWaiter(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, toScale := range servicesToScale {
|
||||
w.Add(toScale.name)
|
||||
|
||||
err := c.Update("service", toScale.resource, map[string]interface{}{
|
||||
"scale": toScale.scale,
|
||||
}, toScale.resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return w.Wait()
|
||||
}
|
||||
130
cmd/secret.go
130
cmd/secret.go
@@ -1,130 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func SecretCommand() cli.Command {
|
||||
secretLsFlags := []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "quiet,q",
|
||||
Usage: "Only display IDs",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "'json' or Custom format: {{.Id}} {{.Name}}",
|
||||
},
|
||||
}
|
||||
|
||||
return cli.Command{
|
||||
Name: "secrets",
|
||||
ShortName: "secret",
|
||||
Usage: "Operations on secrets",
|
||||
Action: defaultAction(secretLs),
|
||||
Flags: secretLsFlags,
|
||||
Subcommands: []cli.Command{
|
||||
cli.Command{
|
||||
Name: "ls",
|
||||
Usage: "List secrets",
|
||||
Description: "\nLists all secrets in the current $RANCHER_ENVIRONMENT. Use `--env <envID>` or `--env <envName>` to select a different environment.\n\nExample:\n\t$ rancher secrets ls\n\t$ rancher --env 1a5 secrets ls\n",
|
||||
ArgsUsage: "None",
|
||||
Action: secretLs,
|
||||
Flags: secretLsFlags,
|
||||
},
|
||||
cli.Command{
|
||||
Name: "create",
|
||||
Usage: "Create a secret",
|
||||
Description: "\nCreate all secret in the current $RANCHER_ENVIRONMENT. Use `--env <envID>` or `--env <envName>` to select a different environment.\n\nExample:\n\t$ rancher secret create my-name file-with-secret\n",
|
||||
ArgsUsage: "NAME [FILE|-]",
|
||||
Action: secretCreate,
|
||||
Flags: []cli.Flag{},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type SecretData struct {
|
||||
ID string
|
||||
Secret client.Secret
|
||||
}
|
||||
|
||||
func secretLs(ctx *cli.Context) error {
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
collection, err := c.Secret.List(defaultListOpts(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writer := NewTableWriter([][]string{
|
||||
{"ID", "ID"},
|
||||
{"NAME", "Secret.Name"},
|
||||
{"CREATED", "Secret.Created"},
|
||||
}, ctx)
|
||||
|
||||
defer writer.Close()
|
||||
|
||||
for _, item := range collection.Data {
|
||||
writer.Write(&SecretData{
|
||||
ID: item.Id,
|
||||
Secret: item,
|
||||
})
|
||||
}
|
||||
|
||||
return writer.Err()
|
||||
}
|
||||
|
||||
func secretCreate(ctx *cli.Context) error {
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w, err := NewWaiter(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ctx.NArg() != 2 {
|
||||
return fmt.Errorf("both NAME and FILE|- are required")
|
||||
}
|
||||
|
||||
name, file := ctx.Args()[0], ctx.Args()[1]
|
||||
var input io.Reader
|
||||
|
||||
if file == "-" {
|
||||
input = os.Stdin
|
||||
} else {
|
||||
input, err = os.Open(file)
|
||||
if os.IsNotExist(err) {
|
||||
logrus.Errorf("Failed to find %s, argument must be a file or -", file)
|
||||
}
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadAll(input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
secret, err := c.Secret.Create(&client.Secret{
|
||||
Name: name,
|
||||
Value: base64.StdEncoding.EncodeToString(content),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.Add(secret.Id)
|
||||
return w.Wait()
|
||||
}
|
||||
186
cmd/ssh.go
186
cmd/ssh.go
@@ -1,186 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"archive/zip"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func SSHCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "ssh",
|
||||
Usage: "SSH into host",
|
||||
Description: "\nFor any hosts created through Rancher using docker-machine, you can SSH into the host. This is not supported for any custom hosts. If the host is not in the current $RANCHER_ENVIRONMENT, use `--env <envID>` or `--env <envName>` to select a different environment.\n\nExample:\n\t$ rancher ssh root@1h1\n\t$ rancher --env 1a5 ssh ubuntu@1h5\n",
|
||||
ArgsUsage: "[HOSTID HOSTNAME...]",
|
||||
Action: hostSSH,
|
||||
Flags: []cli.Flag{},
|
||||
SkipFlagParsing: true,
|
||||
}
|
||||
}
|
||||
|
||||
func hostSSH(ctx *cli.Context) error {
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config, err := lookupConfig(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hostname := ""
|
||||
args := ctx.Args()
|
||||
|
||||
if len(args) > 0 && (args[0] == "-h" || args[0] == "--help") {
|
||||
return cli.ShowCommandHelp(ctx, "ssh")
|
||||
}
|
||||
|
||||
for _, arg := range args {
|
||||
if len(arg) > 0 && arg[0] != '-' {
|
||||
parts := strings.SplitN(arg, "@", 2)
|
||||
hostname = parts[len(parts)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if hostname == "" {
|
||||
return fmt.Errorf("Failed to find hostname in %v", args)
|
||||
}
|
||||
|
||||
hostResource, err := Lookup(c, hostname, "host")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host, err := c.Host.ById(hostResource.Id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
user := getDefaultSSHKey(*host)
|
||||
|
||||
key, err := getSSHKey(hostname, *host, config.AccessKey, config.SecretKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if host.AgentIpAddress == "" {
|
||||
return fmt.Errorf("Failed to find IP for %s", hostname)
|
||||
}
|
||||
|
||||
return processExitCode(callSSH(key, host.AgentIpAddress, ctx.Args(), user))
|
||||
}
|
||||
|
||||
func callSSH(content []byte, ip string, args []string, user string) error {
|
||||
for i, val := range args {
|
||||
if !strings.HasPrefix(val, "-") && len(val) > 0 {
|
||||
parts := strings.SplitN(val, "@", 2)
|
||||
if len(parts) == 2 {
|
||||
parts[len(parts)-1] = ip
|
||||
args[i] = strings.Join(parts, "@")
|
||||
} else if len(parts) == 1 {
|
||||
if user == "" {
|
||||
return errors.New("Need to provide a ssh username")
|
||||
}
|
||||
args[i] = fmt.Sprintf("%s@%s", user, ip)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
tmpfile, err := ioutil.TempFile("", "ssh")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
if err := os.Chmod(tmpfile.Name(), 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tmpfile.Write(content)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tmpfile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := exec.Command("ssh", append([]string{"-i", tmpfile.Name()}, args...)...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func getSSHKey(hostname string, host client.Host, accessKey, secretKey string) ([]byte, error) {
|
||||
link, ok := host.Links["config"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to find SSH key for %s", hostname)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", link, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.SetBasicAuth(accessKey, secretKey)
|
||||
req.Header.Add("Accept-Encoding", "zip")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
tarGz, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("%s", tarGz)
|
||||
}
|
||||
|
||||
zipReader, err := zip.NewReader(bytes.NewReader(tarGz), resp.ContentLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, file := range zipReader.File {
|
||||
if path.Base(file.Name) == "id_rsa" {
|
||||
r, err := file.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
return ioutil.ReadAll(r)
|
||||
}
|
||||
}
|
||||
return nil, errors.New("can't find private key file")
|
||||
}
|
||||
|
||||
func getDefaultSSHKey(host client.Host) string {
|
||||
if host.Amazonec2Config != nil {
|
||||
return host.Amazonec2Config.SshUser
|
||||
}
|
||||
if host.DigitaloceanConfig != nil {
|
||||
return host.DigitaloceanConfig.SshUser
|
||||
}
|
||||
if host.Amazonec2Config != nil {
|
||||
return host.Amazonec2Config.SshUser
|
||||
}
|
||||
return ""
|
||||
}
|
||||
189
cmd/stack.go
189
cmd/stack.go
@@ -1,189 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/rancher/rancher-compose-executor/lookup"
|
||||
"github.com/urfave/cli"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
func StackCommand() cli.Command {
|
||||
stackLsFlags := []cli.Flag{
|
||||
listSystemFlag(),
|
||||
cli.BoolFlag{
|
||||
Name: "quiet,q",
|
||||
Usage: "Only display IDs",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "'json' or Custom format: '{{.ID}} {{.Stack.Name}}'",
|
||||
},
|
||||
}
|
||||
|
||||
return cli.Command{
|
||||
Name: "stacks",
|
||||
ShortName: "stack",
|
||||
Usage: "Operations on stacks",
|
||||
Action: defaultAction(stackLs),
|
||||
Flags: stackLsFlags,
|
||||
Subcommands: []cli.Command{
|
||||
cli.Command{
|
||||
Name: "ls",
|
||||
Usage: "List stacks",
|
||||
Description: "\nLists all stacks in the current $RANCHER_ENVIRONMENT. Use `--env <envID>` or `--env <envName>` to select a different environment.\n\nExample:\n\t$ rancher stacks ls\n\t$ rancher --env 1a5 stacks ls\n",
|
||||
ArgsUsage: "None",
|
||||
Action: stackLs,
|
||||
Flags: stackLsFlags,
|
||||
},
|
||||
cli.Command{
|
||||
Name: "create",
|
||||
Usage: "Create a stacks",
|
||||
Description: "\nCreate all stack in the current $RANCHER_ENVIRONMENT. Use `--env <envID>` or `--env <envName>` to select a different environment.\n\nExample:\n\t$ rancher stacks create\n\t$ rancher --env 1a5 stacks ls\n",
|
||||
ArgsUsage: "None",
|
||||
Action: stackCreate,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolTFlag{
|
||||
Name: "start",
|
||||
Usage: "Start stack on create",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "system,s",
|
||||
Usage: "Create a system stack",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "empty,e",
|
||||
Usage: "Create an empty stack",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet,q",
|
||||
Usage: "Only display IDs",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "docker-compose,f",
|
||||
Usage: "Docker Compose file",
|
||||
Value: "docker-compose.yml",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "rancher-compose,r",
|
||||
Usage: "Rancher Compose file",
|
||||
Value: "rancher-compose.yml",
|
||||
},
|
||||
//cli.StringFlag{
|
||||
// Name: "answers,a",
|
||||
// Usage: "Answers files",
|
||||
// Value: "answers",
|
||||
//},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type StackData struct {
|
||||
ID string
|
||||
Catalog string
|
||||
Stack client.Stack
|
||||
State string
|
||||
ServiceCount int
|
||||
}
|
||||
|
||||
func stackLs(ctx *cli.Context) error {
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
collection, err := c.Stack.List(defaultListOpts(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writer := NewTableWriter([][]string{
|
||||
{"ID", "ID"},
|
||||
{"NAME", "Stack.Name"},
|
||||
{"STATE", "State"},
|
||||
{"CATALOG", "Catalog"},
|
||||
{"SERVICES", "ServiceCount"},
|
||||
{"DETAIL", "Stack.TransitioningMessage"},
|
||||
}, ctx)
|
||||
|
||||
defer writer.Close()
|
||||
|
||||
for _, item := range collection.Data {
|
||||
combined := item.HealthState
|
||||
if item.State != "active" || combined == "" {
|
||||
combined = item.State
|
||||
}
|
||||
writer.Write(&StackData{
|
||||
ID: item.Id,
|
||||
Stack: item,
|
||||
State: combined,
|
||||
Catalog: item.ExternalId,
|
||||
ServiceCount: len(item.ServiceIds),
|
||||
})
|
||||
}
|
||||
|
||||
return writer.Err()
|
||||
}
|
||||
|
||||
func getFile(name string) (string, error) {
|
||||
if name == "" {
|
||||
return "", nil
|
||||
}
|
||||
bytes, err := ioutil.ReadFile(name)
|
||||
if os.IsNotExist(err) {
|
||||
return "", nil
|
||||
|
||||
}
|
||||
return string(bytes), err
|
||||
}
|
||||
|
||||
func parseAnswers(ctx *cli.Context) (map[string]interface{}, error) {
|
||||
file := ctx.String("answers")
|
||||
|
||||
return lookup.ParseEnvFile(file)
|
||||
}
|
||||
|
||||
func stackCreate(ctx *cli.Context) error {
|
||||
c, err := GetClient(ctx)
|
||||
|
||||
names := []string{RandomName()}
|
||||
if len(ctx.Args()) > 0 {
|
||||
names = ctx.Args()
|
||||
}
|
||||
|
||||
w, err := NewWaiter(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var lastErr error
|
||||
for _, name := range names {
|
||||
stack := &client.Stack{
|
||||
Name: name,
|
||||
}
|
||||
|
||||
if !ctx.Bool("empty") {
|
||||
//var err error
|
||||
// todo: revisit
|
||||
//stack.Answers, err = parseAnswers(ctx)
|
||||
//if err != nil {
|
||||
//return errors.Wrap(err, "reading answers")
|
||||
//}
|
||||
}
|
||||
|
||||
stack, err = c.Stack.Create(stack)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
|
||||
w.Add(stack.Id)
|
||||
}
|
||||
|
||||
if lastErr != nil {
|
||||
return lastErr
|
||||
}
|
||||
|
||||
return w.Wait()
|
||||
}
|
||||
37
cmd/start.go
37
cmd/start.go
@@ -1,37 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
startTypes = []string{"service", "container", "host", "stack"}
|
||||
)
|
||||
|
||||
func StartCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "start",
|
||||
ShortName: "activate",
|
||||
Usage: "Start or activate " + strings.Join(startTypes, ", "),
|
||||
Description: "\nStart resources by ID or name in the current $RANCHER_ENVIRONMENT. Use `--env <envID>` or `--env <envName>` to select a different environment.\n\nExample:\n\t$ rancher start 1s70\n\t$ rancher --env 1a5 start stackName/serviceName \n",
|
||||
ArgsUsage: "[ID NAME...]",
|
||||
Action: startResources,
|
||||
Flags: []cli.Flag{
|
||||
typesStringFlag(startTypes),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func startResources(ctx *cli.Context) error {
|
||||
return forEachResource(ctx, startTypes, func(c *client.RancherClient, resource *client.Resource) (string, error) {
|
||||
action, err := pickAction(resource, "start", "activate", "activateservices")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = c.Action(resource.Type, action, resource, nil, resource)
|
||||
return resource.Id, err
|
||||
})
|
||||
}
|
||||
36
cmd/stop.go
36
cmd/stop.go
@@ -1,36 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
stopTypes = []string{"service", "container", "host", "stack"}
|
||||
)
|
||||
|
||||
func StopCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "stop",
|
||||
ShortName: "deactivate",
|
||||
Usage: "Stop or deactivate " + strings.Join(stopTypes, ", "),
|
||||
Description: "\nStop resources by ID or name in the current $RANCHER_ENVIRONMENT. Use `--env <envID>` or `--env <envName>` to select a different environment.\n\nExample:\n\t$ rancher stop 1s70\n\t$ rancher --env 1a5 stop stackName/serviceName \n",
|
||||
ArgsUsage: "[ID NAME...]",
|
||||
Action: stopResources,
|
||||
Flags: []cli.Flag{
|
||||
typesStringFlag(stopTypes),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func stopResources(ctx *cli.Context) error {
|
||||
return forEachResource(ctx, stopTypes, func(c *client.RancherClient, resource *client.Resource) (string, error) {
|
||||
action, err := pickAction(resource, "stop", "deactivate", "deactivateservices")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return resource.Id, c.Action(resource.Type, action, resource, nil, resource)
|
||||
})
|
||||
}
|
||||
27
cmd/type.go
27
cmd/type.go
@@ -1,27 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func typesStringFlag(def []string) cli.StringSliceFlag {
|
||||
usage := "Restrict restart to specific types"
|
||||
if len(def) > 0 {
|
||||
usage = fmt.Sprintf("%s (%s)", usage, strings.Join(def, ", "))
|
||||
}
|
||||
return cli.StringSliceFlag{
|
||||
Name: "type",
|
||||
Usage: usage,
|
||||
}
|
||||
}
|
||||
|
||||
func getTypesStringFlag(ctx *cli.Context, def []string) []string {
|
||||
val := ctx.StringSlice("type")
|
||||
if len(val) > 0 {
|
||||
return val
|
||||
}
|
||||
return def
|
||||
}
|
||||
373
cmd/up.go
373
cmd/up.go
@@ -1,373 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"io"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
dclient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/fatih/color"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rancher/cli/monitor"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/rancher/rancher-docker-api-proxy"
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var colors = []color.Attribute{color.FgGreen, color.FgBlue, color.FgCyan, color.FgMagenta, color.FgRed, color.FgWhite, color.FgYellow}
|
||||
|
||||
func UpCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "up",
|
||||
Usage: "Bring all services up",
|
||||
Action: rancherUp,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "d",
|
||||
Usage: "Do not block and log",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "rollback, r",
|
||||
Usage: "Rollback to the previous deployed version",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "file,f",
|
||||
Usage: "Specify one or more alternate compose files (default: compose.yml)",
|
||||
Value: &cli.StringSlice{},
|
||||
EnvVar: "COMPOSE_FILE",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "stack,s",
|
||||
Usage: "Specify an alternate project name (default: directory name)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "prune",
|
||||
Usage: "Prune services that doesn't exist on the current compose files",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func rancherUp(ctx *cli.Context) error {
|
||||
rancherClient, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// only look for --file or ./compose.yml
|
||||
composes, err := resolveComposeFile(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to resolve compose file")
|
||||
}
|
||||
|
||||
//resolve the stackName from --stack or current dir name
|
||||
stackName, err := resolveStackName(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to resolve stackName")
|
||||
}
|
||||
|
||||
// get existing stack by stack name
|
||||
stacks, err := rancherClient.Stack.List(&client.ListOpts{
|
||||
Filters: map[string]interface{}{
|
||||
"name": stackName,
|
||||
"removed_null": nil,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to list stacks")
|
||||
}
|
||||
|
||||
// rollback
|
||||
if ctx.Bool("rollback") {
|
||||
if len(stacks.Data) == 0 {
|
||||
return errors.Errorf("Can't find stack %v", stackName)
|
||||
}
|
||||
_, err := rancherClient.Stack.ActionRollback(&stacks.Data[0])
|
||||
if err != nil {
|
||||
return errors.Errorf("failed to rollback stack %v", stackName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
stackID, err := doUp(stacks, stackName, composes, ctx, rancherClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !ctx.Bool("d") {
|
||||
watcher := monitor.NewUpWatcher(rancherClient)
|
||||
watcher.Subscribe()
|
||||
watchErr := make(chan error)
|
||||
logErr := make(chan error)
|
||||
go func(err chan error) { err <- watcher.Start(stackID) }(watchErr)
|
||||
services, err := watchServiceIds(stackID, rancherClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Stack %s is up", stackName)
|
||||
go func(err chan error) { err <- watchLogs(rancherClient, stackID, services) }(logErr)
|
||||
for {
|
||||
select {
|
||||
case err := <-watchErr:
|
||||
return errors.Errorf("Rancher up failed. Exiting Error: %v", err)
|
||||
case err := <-logErr:
|
||||
if err != nil {
|
||||
logrus.Warnf("Failed to watch container logs. Sleep 1 seconds and retry")
|
||||
}
|
||||
time.Sleep(time.Second * 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Println(stackID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func toUnixPath(p string) string {
|
||||
return strings.Replace(p, "\\", "/", -1)
|
||||
}
|
||||
|
||||
func getStack(c *client.RancherClient, stackID string) (*client.Stack, error) {
|
||||
return c.Stack.ById(stackID)
|
||||
}
|
||||
|
||||
func getServices(c *client.RancherClient, serviceIds []string) ([]client.Service, error) {
|
||||
services := []client.Service{}
|
||||
for _, serviceID := range serviceIds {
|
||||
service, err := c.Service.ById(serviceID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get service (id: [%v])", serviceID)
|
||||
}
|
||||
services = append(services, *service)
|
||||
}
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func getLogs(c *client.RancherClient, instanceIds map[string]struct{}) error {
|
||||
wg := sync.WaitGroup{}
|
||||
instances := []client.Instance{}
|
||||
for instanceID := range instanceIds {
|
||||
instance, err := c.Instance.ById(instanceID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get instance id [%v]", instanceID)
|
||||
}
|
||||
instances = append(instances, *instance)
|
||||
}
|
||||
listenSocks := map[string]*dclient.Client{}
|
||||
for _, i := range instances {
|
||||
if i.ExternalId == "" || i.HostId == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if dockerClient, ok := listenSocks[i.HostId]; ok {
|
||||
wg.Add(1)
|
||||
go func(dockerClient *dclient.Client, i client.Instance) {
|
||||
log(i, dockerClient)
|
||||
wg.Done()
|
||||
}(dockerClient, i)
|
||||
continue
|
||||
}
|
||||
|
||||
resource, err := Lookup(c, i.HostId, "host")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host, err := c.Host.ById(resource.Id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state := getHostState(host)
|
||||
if state != "active" && state != "inactive" {
|
||||
logrus.Errorf("Can not contact host %s in state %s", i.HostId, state)
|
||||
continue
|
||||
}
|
||||
|
||||
tempfile, err := ioutil.TempFile("", "docker-sock")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(tempfile.Name())
|
||||
|
||||
if err := tempfile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dockerHost := "unix://" + tempfile.Name()
|
||||
proxy := dockerapiproxy.NewProxy(c, host.Id, dockerHost)
|
||||
if err := proxy.Listen(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
logrus.Fatal(proxy.Serve())
|
||||
}()
|
||||
|
||||
dockerClient, err := dclient.NewClient(dockerHost, "", nil, nil)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to connect to host %s: %v", i.HostId, err)
|
||||
continue
|
||||
}
|
||||
|
||||
listenSocks[i.HostId] = dockerClient
|
||||
|
||||
wg.Add(1)
|
||||
go func(dockerClient *dclient.Client, i client.Instance) {
|
||||
log(i, dockerClient)
|
||||
wg.Done()
|
||||
}(dockerClient, i)
|
||||
}
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func log(instance client.Instance, dockerClient *dclient.Client) error {
|
||||
c, err := dockerClient.ContainerInspect(context.Background(), instance.ExternalId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Follow: true,
|
||||
Tail: "10",
|
||||
}
|
||||
responseBody, err := dockerClient.ContainerLogs(context.Background(), c.ID, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
if c.Config.Tty {
|
||||
_, err = io.Copy(os.Stdout, responseBody)
|
||||
} else {
|
||||
l := loggerFactory.CreateContainerLogger(instance.Name)
|
||||
_, err = stdcopy.StdCopy(writerFunc(l.Out), writerFunc(l.Err), responseBody)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func resolveComposeFile(ctx *cli.Context) (map[string]string, error) {
|
||||
composeFiles := ctx.StringSlice("file")
|
||||
if len(composeFiles) == 0 {
|
||||
composeFiles = []string{"compose.yml"}
|
||||
}
|
||||
composes := map[string]string{}
|
||||
for _, composeFile := range composeFiles {
|
||||
fp, err := filepath.Abs(composeFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to lookup current directory name")
|
||||
}
|
||||
file, err := os.Open(fp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Can not find compose.yml")
|
||||
}
|
||||
defer file.Close()
|
||||
buf, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to read file")
|
||||
}
|
||||
composes[composeFile] = string(buf)
|
||||
}
|
||||
return composes, nil
|
||||
}
|
||||
|
||||
func resolveStackName(ctx *cli.Context) (string, error) {
|
||||
if ctx.String("stack") != "" {
|
||||
return ctx.String("stack"), nil
|
||||
}
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Failed to get current working dir for stackName")
|
||||
}
|
||||
return path.Base(toUnixPath(wd)), nil
|
||||
}
|
||||
|
||||
func doUp(stacks *client.StackCollection, stackName string, composes map[string]string, ctx *cli.Context, rancherClient *client.RancherClient) (string, error) {
|
||||
if len(stacks.Data) > 0 {
|
||||
// update stacks
|
||||
stacks.Data[0].Templates = composes
|
||||
prune := ctx.Bool("prune")
|
||||
if !ctx.Bool("d") {
|
||||
logrus.Infof("Updating stack %v", stackName)
|
||||
}
|
||||
_, err := rancherClient.Stack.Update(&stacks.Data[0], client.Stack{
|
||||
Templates: composes,
|
||||
Prune: prune,
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to update stack %v", stackName)
|
||||
}
|
||||
return stacks.Data[0].Id, nil
|
||||
}
|
||||
// create new stack
|
||||
if !ctx.Bool("d") {
|
||||
logrus.Infof("Creating Stack %s", stackName)
|
||||
}
|
||||
prune := ctx.Bool("prune")
|
||||
stack, err := rancherClient.Stack.Create(&client.Stack{
|
||||
Name: stackName,
|
||||
Templates: composes,
|
||||
Prune: prune,
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create stack %v", stackName)
|
||||
}
|
||||
return stack.Id, nil
|
||||
}
|
||||
|
||||
func watchLogs(rancherClient *client.RancherClient, stackID string, services []client.Service) error {
|
||||
instanceIds := map[string]struct{}{}
|
||||
for _, service := range services {
|
||||
for _, instanceID := range service.InstanceIds {
|
||||
instanceIds[instanceID] = struct{}{}
|
||||
}
|
||||
}
|
||||
if err := getLogs(rancherClient, instanceIds); err != nil {
|
||||
return errors.Wrapf(err, "failed to get container logs")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func watchServiceIds(stackID string, rancherClient *client.RancherClient) ([]client.Service, error) {
|
||||
for {
|
||||
stack, err := getStack(rancherClient, stackID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stack.Transitioning == "error" {
|
||||
return nil, errors.Errorf("Failed to up stack %s. Error: %s", stack.Name, stack.TransitioningMessage)
|
||||
}
|
||||
if len(stack.ServiceIds) != 0 {
|
||||
services, err := getServices(rancherClient, stack.ServiceIds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
isUp := true
|
||||
for _, service := range services {
|
||||
if service.Transitioning != "no" {
|
||||
isUp = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if isUp {
|
||||
return services, nil
|
||||
}
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func printErr(id string, oldErr, newErr error) error {
|
||||
if newErr != nil {
|
||||
fmt.Printf("error %s: %s\n", id, newErr.Error())
|
||||
return newErr
|
||||
}
|
||||
return oldErr
|
||||
}
|
||||
|
||||
func forEachResourceWithClient(c *client.RancherClient, ctx *cli.Context, types []string, fn func(c *client.RancherClient, resource *client.Resource) (string, error)) error {
|
||||
types = getTypesStringFlag(ctx, types)
|
||||
w, err := NewWaiter(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var lastErr error
|
||||
for _, id := range ctx.Args() {
|
||||
resource, err := Lookup(c, id, types...)
|
||||
if err != nil {
|
||||
lastErr = printErr(id, lastErr, err)
|
||||
continue
|
||||
}
|
||||
|
||||
resourceID, err := fn(c, resource)
|
||||
if resourceID == "" {
|
||||
resourceID = resource.Id
|
||||
}
|
||||
lastErr = printErr(resource.Id, lastErr, err)
|
||||
if resourceID != "" && resourceID != "-" {
|
||||
w.Add(resourceID)
|
||||
}
|
||||
}
|
||||
|
||||
if lastErr != nil {
|
||||
return cli.NewExitError("", 1)
|
||||
}
|
||||
|
||||
return w.Wait()
|
||||
}
|
||||
|
||||
func forEachResource(ctx *cli.Context, types []string, fn func(c *client.RancherClient, resource *client.Resource) (string, error)) error {
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return forEachResourceWithClient(c, ctx, types, fn)
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/rancher/norman/types"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@@ -19,8 +19,8 @@ func listSystemFlag() cli.BoolFlag {
|
||||
}
|
||||
}
|
||||
|
||||
func baseListOpts() *client.ListOpts {
|
||||
return &client.ListOpts{
|
||||
func baseListOpts() *types.ListOpts {
|
||||
return &types.ListOpts{
|
||||
Filters: map[string]interface{}{
|
||||
"limit": -2,
|
||||
"all": true,
|
||||
@@ -28,7 +28,7 @@ func baseListOpts() *client.ListOpts {
|
||||
}
|
||||
}
|
||||
|
||||
func defaultListOpts(ctx *cli.Context) *client.ListOpts {
|
||||
func defaultListOpts(ctx *cli.Context) *types.ListOpts {
|
||||
listOpts := baseListOpts()
|
||||
if ctx != nil && !ctx.Bool("all") {
|
||||
listOpts.Filters["removed_null"] = "1"
|
||||
|
||||
157
cmd/volume.go
157
cmd/volume.go
@@ -1,157 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func VolumeCommand() cli.Command {
|
||||
volumeLsFlags := []cli.Flag{
|
||||
listAllFlag(),
|
||||
cli.BoolFlag{
|
||||
Name: "quiet,q",
|
||||
Usage: "Only display IDs",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "'json' or Custom format: '{{.ID}} {{.Volume.Name}}'",
|
||||
},
|
||||
}
|
||||
|
||||
return cli.Command{
|
||||
Name: "volumes",
|
||||
ShortName: "volume",
|
||||
Usage: "Operations on volumes",
|
||||
Action: defaultAction(volumeLs),
|
||||
Flags: volumeLsFlags,
|
||||
Subcommands: []cli.Command{
|
||||
cli.Command{
|
||||
Name: "ls",
|
||||
Usage: "List volumes",
|
||||
Action: volumeLs,
|
||||
Flags: volumeLsFlags,
|
||||
},
|
||||
cli.Command{
|
||||
Name: "rm",
|
||||
Usage: "Delete volume",
|
||||
Action: volumeRm,
|
||||
},
|
||||
cli.Command{
|
||||
Name: "create",
|
||||
Usage: "Create volume",
|
||||
Action: volumeCreate,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "driver",
|
||||
Usage: "Specify volume driver name",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "opt",
|
||||
Usage: "Set driver specific key/value options",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type VolumeData struct {
|
||||
ID string
|
||||
Volume client.Volume
|
||||
}
|
||||
|
||||
func volumeLs(ctx *cli.Context) error {
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
collection, err := c.Volume.List(defaultListOpts(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writer := NewTableWriter([][]string{
|
||||
{"ID", "ID"},
|
||||
{"NAME", "Volume.Name"},
|
||||
{"STATE", "Volume.State"},
|
||||
{"DRIVER", "Volume.Driver"},
|
||||
{"DETAIL", "Volume.TransitioningMessage"},
|
||||
}, ctx)
|
||||
|
||||
defer writer.Close()
|
||||
|
||||
for _, item := range collection.Data {
|
||||
writer.Write(&VolumeData{
|
||||
ID: item.Id,
|
||||
Volume: item,
|
||||
})
|
||||
}
|
||||
|
||||
return writer.Err()
|
||||
}
|
||||
|
||||
func volumeRm(ctx *cli.Context) error {
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var lastErr error
|
||||
for _, id := range ctx.Args() {
|
||||
volume, err := Lookup(c, id, "volume")
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
logrus.Errorf("Failed to delete %s: %v", id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := c.Delete(volume); err != nil {
|
||||
lastErr = err
|
||||
logrus.Errorf("Failed to delete %s: %v", id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Println(volume.Id)
|
||||
}
|
||||
|
||||
return lastErr
|
||||
}
|
||||
|
||||
func volumeCreate(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return cli.NewExitError("Volume name is required as the first argument", 1)
|
||||
}
|
||||
|
||||
c, err := GetClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newVol := &client.Volume{
|
||||
Name: ctx.Args()[0],
|
||||
Driver: ctx.String("driver"),
|
||||
DriverOpts: map[string]string{},
|
||||
}
|
||||
|
||||
for _, arg := range ctx.StringSlice("opt") {
|
||||
parts := strings.SplitN(arg, "=", 2)
|
||||
if len(parts) == 1 {
|
||||
newVol.DriverOpts[parts[0]] = ""
|
||||
} else {
|
||||
newVol.DriverOpts[parts[0]] = parts[1]
|
||||
}
|
||||
}
|
||||
|
||||
newVol, err = c.Volume.Create(newVol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(newVol.Id)
|
||||
return nil
|
||||
}
|
||||
44
cmd/wait.go
44
cmd/wait.go
@@ -5,10 +5,11 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/rancher/cli/cliclient"
|
||||
"github.com/rancher/cli/monitor"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
|
||||
"github.com/rancher/rancher-compose-executor/project/options"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@@ -74,7 +75,7 @@ type Waiter struct {
|
||||
timeout int
|
||||
state string
|
||||
resources []string
|
||||
client *client.RancherClient
|
||||
client *cliclient.MasterClient
|
||||
monitor *monitor.Monitor
|
||||
}
|
||||
|
||||
@@ -113,9 +114,10 @@ func (w *Waiter) done(resourceType, id string) (bool, error) {
|
||||
return w.checkDone(resourceType, id, data)
|
||||
}
|
||||
|
||||
if err := w.client.ById(resourceType, id, &data); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// FIXME Add this back?
|
||||
//if err := w.client.ById(resourceType, id, &data); err != nil {
|
||||
// return false, err
|
||||
//}
|
||||
|
||||
return w.checkDone(resourceType, id, data)
|
||||
}
|
||||
@@ -147,22 +149,22 @@ func (w *Waiter) Wait() error {
|
||||
watching := map[ResourceID]bool{}
|
||||
w.monitor = monitor.New(w.client)
|
||||
sub := w.monitor.Subscribe()
|
||||
go func() { logrus.Fatal(w.monitor.Start()) }()
|
||||
//go func() { logrus.Fatal(w.monitor.Start()) }()
|
||||
|
||||
for _, resource := range w.resources {
|
||||
r, err := Lookup(w.client, resource, waitTypes...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok, err := w.done(r.Type, r.Id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
watching[NewResourceID(r.Type, r.Id)] = true
|
||||
}
|
||||
}
|
||||
//for _, resource := range w.resources {
|
||||
// r, err := Lookup(w.client, resource, waitTypes...)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// ok, err := w.done(r.Type, r.ID)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if !ok {
|
||||
// watching[NewResourceID(r.Type, r.ID)] = true
|
||||
// }
|
||||
//}
|
||||
|
||||
timeout := time.After(time.Duration(w.timeout) * time.Second)
|
||||
every := time.Tick(10 * time.Second)
|
||||
|
||||
76
config/config.go
Normal file
76
config/config.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Config holds the main config for the user
|
||||
type Config struct {
|
||||
Servers map[string]*ServerConfig
|
||||
//Path to the config file
|
||||
Path string `json:"path,omitempty"`
|
||||
// CurrentServer the user has in focus
|
||||
CurrentServer string
|
||||
}
|
||||
|
||||
//ServerConfig holds the config for each server the user has setup
|
||||
type ServerConfig struct {
|
||||
AccessKey string `json:"accessKey"`
|
||||
SecretKey string `json:"secretKey"`
|
||||
TokenKey string `json:"tokenKey"`
|
||||
URL string `json:"url"`
|
||||
Project string `json:"project"`
|
||||
CACerts string `json:"cacert"`
|
||||
}
|
||||
|
||||
func (c Config) Write() error {
|
||||
err := os.MkdirAll(path.Dir(c.Path), 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("Saving config to %s", c.Path)
|
||||
p := c.Path
|
||||
c.Path = ""
|
||||
output, err := os.Create(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer output.Close()
|
||||
|
||||
return json.NewEncoder(output).Encode(c)
|
||||
}
|
||||
|
||||
func (c Config) FocusedServer() *ServerConfig {
|
||||
return c.Servers[c.CurrentServer]
|
||||
}
|
||||
|
||||
func (c ServerConfig) EnvironmentURL() (string, error) {
|
||||
url, err := baseURL(c.URL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return url, nil
|
||||
}
|
||||
|
||||
func baseURL(fullURL string) (string, error) {
|
||||
idx := strings.LastIndex(fullURL, "/v3")
|
||||
if idx == -1 {
|
||||
u, err := url.Parse(fullURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
newURL := url.URL{
|
||||
Scheme: u.Scheme,
|
||||
Host: u.Host,
|
||||
}
|
||||
return newURL.String(), nil
|
||||
}
|
||||
return fullURL[:idx], nil
|
||||
}
|
||||
94
main.go
94
main.go
@@ -6,10 +6,10 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rancher/cli/cmd"
|
||||
"github.com/rancher/cli/rancher_prompt"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@@ -25,7 +25,7 @@ Options:
|
||||
{{range .Flags}}{{if .Hidden}}{{else}}{{.}}
|
||||
{{end}}{{end}}{{end}}
|
||||
Commands:
|
||||
{{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
||||
{{range .Commands}}{{.Name}}{{with .Aliases}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}
|
||||
Run '{{.Name}} COMMAND --help' for more information on a command.
|
||||
`
|
||||
@@ -65,99 +65,13 @@ func mainErr() error {
|
||||
Name: "debug",
|
||||
Usage: "Debug logging",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "config,c",
|
||||
Usage: "Client configuration file (default ${HOME}/.rancher/cli.json)",
|
||||
EnvVar: "RANCHER_CLIENT_CONFIG",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cluster",
|
||||
Usage: "Specify cluster used for kubectl",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "environment,env",
|
||||
Usage: "Environment name or ID",
|
||||
EnvVar: "RANCHER_ENVIRONMENT",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "url",
|
||||
Usage: "Specify the Rancher API endpoint URL",
|
||||
EnvVar: "RANCHER_URL",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "access-key",
|
||||
Usage: "Specify Rancher API access key",
|
||||
EnvVar: "RANCHER_ACCESS_KEY",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "secret-key",
|
||||
Usage: "Specify Rancher API secret key",
|
||||
EnvVar: "RANCHER_SECRET_KEY",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "host",
|
||||
Usage: "Host used for docker command",
|
||||
EnvVar: "RANCHER_DOCKER_HOST",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "wait,w",
|
||||
Usage: "Wait for resource to reach resting state",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "wait-timeout",
|
||||
Usage: "Timeout in seconds to wait",
|
||||
Value: 600,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "wait-state",
|
||||
Usage: "State to wait for (active, healthy, etc)",
|
||||
},
|
||||
// Below four flags are for rancher-compose code capability. The users doesn't use them directly
|
||||
cli.StringFlag{
|
||||
Name: "rancher-file",
|
||||
Hidden: true,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "env-file",
|
||||
Hidden: true,
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "file,f",
|
||||
Hidden: true,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "project-name",
|
||||
Hidden: true,
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{
|
||||
cmd.CatalogCommand(),
|
||||
cmd.ClusterCommand(),
|
||||
cmd.ConfigCommand(),
|
||||
cmd.DockerCommand(),
|
||||
cmd.EnvCommand(),
|
||||
cmd.EventsCommand(),
|
||||
cmd.ExecCommand(),
|
||||
cmd.ExportCommand(),
|
||||
cmd.HostCommand(),
|
||||
cmd.KubectlCommand(),
|
||||
cmd.LogsCommand(),
|
||||
cmd.LoginCommand(),
|
||||
cmd.ProjectCommand(),
|
||||
cmd.PsCommand(),
|
||||
cmd.PullCommand(),
|
||||
cmd.PromptCommand(),
|
||||
cmd.RestartCommand(),
|
||||
cmd.RmCommand(),
|
||||
cmd.RunCommand(),
|
||||
cmd.ScaleCommand(),
|
||||
cmd.SecretCommand(),
|
||||
cmd.SSHCommand(),
|
||||
cmd.StackCommand(),
|
||||
cmd.StartCommand(),
|
||||
cmd.StopCommand(),
|
||||
cmd.UpCommand(),
|
||||
cmd.VolumeCommand(),
|
||||
cmd.InspectCommand(),
|
||||
cmd.WaitCommand(),
|
||||
}
|
||||
for _, com := range app.Commands {
|
||||
rancherPrompt.Commands[com.Name] = com
|
||||
|
||||
@@ -3,14 +3,13 @@ package monitor
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/rancher/cli/cliclient"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Event struct {
|
||||
@@ -22,7 +21,7 @@ type Event struct {
|
||||
|
||||
type Monitor struct {
|
||||
sync.Mutex
|
||||
c *client.RancherClient
|
||||
c *cliclient.MasterClient
|
||||
cache *cache.Cache
|
||||
subCounter int
|
||||
subscriptions map[int]*Subscription
|
||||
@@ -55,7 +54,7 @@ type Subscription struct {
|
||||
C chan *Event
|
||||
}
|
||||
|
||||
func New(c *client.RancherClient) *Monitor {
|
||||
func New(c *cliclient.MasterClient) *Monitor {
|
||||
return &Monitor{
|
||||
c: c,
|
||||
cache: cache.New(5*time.Minute, 30*time.Second),
|
||||
@@ -63,44 +62,44 @@ func New(c *client.RancherClient) *Monitor {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Monitor) Start() error {
|
||||
schema, ok := m.c.GetSchemas().CheckSchema("subscribe")
|
||||
if !ok {
|
||||
return fmt.Errorf("Not authorized to subscribe")
|
||||
}
|
||||
|
||||
urlString := schema.Links["collection"]
|
||||
u, err := url.Parse(urlString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "http":
|
||||
u.Scheme = "ws"
|
||||
case "https":
|
||||
u.Scheme = "wss"
|
||||
}
|
||||
|
||||
q := u.Query()
|
||||
q.Add("eventNames", "resource.change")
|
||||
q.Add("eventNames", "service.kubernetes.change")
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
conn, resp, err := m.c.Websocket(u.String(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != 101 {
|
||||
return fmt.Errorf("Bad status code: %d %s", resp.StatusCode, resp.Status)
|
||||
}
|
||||
|
||||
logrus.Debugf("Connected to: %s", u.String())
|
||||
|
||||
return m.watch(conn)
|
||||
}
|
||||
//func (m *Monitor) Start() error {
|
||||
// schema, err := m.c.ManagementClient.DynamicSchema.ByID("subscribe")
|
||||
// if nil != err {
|
||||
// return fmt.Errorf("not authorized to subscribe")
|
||||
// }
|
||||
//
|
||||
// urlString := schema.Links["collection"]
|
||||
// u, err := url.Parse(urlString)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// switch u.Scheme {
|
||||
// case "http":
|
||||
// u.Scheme = "ws"
|
||||
// case "https":
|
||||
// u.Scheme = "wss"
|
||||
// }
|
||||
//
|
||||
// q := u.Query()
|
||||
// q.Add("eventNames", "resource.change")
|
||||
// q.Add("eventNames", "service.kubernetes.change")
|
||||
//
|
||||
// u.RawQuery = q.Encode()
|
||||
//
|
||||
// conn, resp, err := m.c.Websocket(u.String(), nil)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// if resp.StatusCode != 101 {
|
||||
// return fmt.Errorf("Bad status code: %d %s", resp.StatusCode, resp.Status)
|
||||
// }
|
||||
//
|
||||
// logrus.Debugf("Connected to: %s", u.String())
|
||||
//
|
||||
// return m.watch(conn)
|
||||
//}
|
||||
|
||||
func (m *Monitor) Get(resourceType, resourceID string, obj interface{}) (bool, error) {
|
||||
val, ok := m.cache.Get(key(resourceType, resourceID))
|
||||
|
||||
@@ -6,10 +6,10 @@ import (
|
||||
"net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rancher/go-rancher/v3"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type UpWatcher struct {
|
||||
|
||||
@@ -5,16 +5,15 @@ cd $(dirname $0)/..
|
||||
|
||||
echo Running validation
|
||||
|
||||
PACKAGES=". $(find -name '*.go' | xargs -I{} dirname {} | cut -f2 -d/ | sort -u | grep -Ev '(^\.$|.git|.trash-cache|vendor|bin)' | sed -e 's!^!./!' -e 's!$!/...!')"
|
||||
|
||||
echo Running: go vet
|
||||
go vet ${PACKAGES}
|
||||
go vet ./...
|
||||
|
||||
echo Running: golint
|
||||
for i in ${PACKAGES}; do
|
||||
if [ -n "$(golint $i | grep -v 'should have comment.*or be unexported' | tee /dev/stderr)" ]; then
|
||||
failed=true
|
||||
fi
|
||||
done
|
||||
# use go list until golint acts the same as go-1.9 and ignores vendor with ./...
|
||||
if [ -n "$(golint $(go list ./...) | grep -v 'should have comment.*or be unexported' | tee /dev/stderr)" ]; then
|
||||
failed=true
|
||||
fi
|
||||
test -z "$failed"
|
||||
|
||||
echo Running: go fmt
|
||||
test -z "$(go fmt ${PACKAGES} | tee /dev/stderr)"
|
||||
test -z "$(go fmt ./... | tee /dev/stderr)"
|
||||
|
||||
Reference in New Issue
Block a user