1
0
mirror of https://github.com/openshift/installer.git synced 2026-02-06 09:47:02 +01:00

tests/smoke/vendor: switch from glide to dep

$ dep init
// edit Gopkg.toml
```toml
[prune]
  non-go = true
  go-tests = true
  unused-packages = true
```
$ dep ensure
This commit is contained in:
Abhinav Dahiya
2018-09-28 23:54:44 -07:00
parent 1f45543179
commit 70ea0e816b
207 changed files with 100849 additions and 18331 deletions

830
tests/smoke/Gopkg.lock generated Normal file
View File

@@ -0,0 +1,830 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:aa65b4877ac225076b4362885e9122fdf6a8728f735749c24f1aeabcad9bdaba"
name = "cloud.google.com/go"
packages = [
"compute/metadata",
"internal",
]
pruneopts = "NUT"
revision = "3b1ae45394a234c385be014e9a488f2bb6eef821"
[[projects]]
digest = "1:0f04209a212e67d641f36a9a9ef99b1a8e3371cd839f15bccf9a177ef93c3b03"
name = "github.com/Azure/go-autorest"
packages = [
"autorest",
"autorest/azure",
"autorest/date",
]
pruneopts = "NUT"
revision = "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca"
version = "v7.2.3"
[[projects]]
digest = "1:975b8fd9f8bf97cfdbc6a6af823172e4315b8e0b1d99f7651472f4fe9ff61cbe"
name = "github.com/PuerkitoBio/purell"
packages = ["."]
pruneopts = "NUT"
revision = "8a290539e2e8629dbc4e6bad948158f790ec31f4"
version = "v1.0.0"
[[projects]]
digest = "1:8098cd40cd09879efbf12e33bcd51ead4a66006ac802cd563a66c4f3373b9727"
name = "github.com/PuerkitoBio/urlesc"
packages = ["."]
pruneopts = "NUT"
revision = "5bd2802263f21d8788851d5305584c82a5c75d7e"
[[projects]]
digest = "1:75d40fa0c338f4c56056a3985e91fa371e8fcd0293e45b80afa7debecaf56012"
name = "github.com/beorn7/perks"
packages = ["quantile"]
pruneopts = "NUT"
revision = "3ac7bf7a47d159a033b107610db8a1b6575507a4"
[[projects]]
digest = "1:620bade21ddf8256869717861431d52650c7e40bc56bdcd3d5ec3da63e7573b0"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = "NUT"
revision = "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
[[projects]]
digest = "1:fff28a0718d58de6fcf391a8f7122e1c7b28b253290c8d85bb04b11a0e66162e"
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
pruneopts = "NUT"
revision = "01aeca54ebda6e0fbfafd0a524d234159c05ec20"
[[projects]]
digest = "1:b2499f1a17c35f4dcc051ebc0cbd350a2a3ded5e4656e6111dcfb823b0957a84"
name = "github.com/docker/distribution"
packages = [
"digest",
"reference",
]
pruneopts = "NUT"
revision = "cd27f179f2c10c5d300e6d09025b538c475b0d51"
[[projects]]
digest = "1:f02c3d7fb5b3b87fc22bb822ba81aa55207dfed951f2d48fd47c961a5db9cb59"
name = "github.com/docker/engine-api"
packages = [
"types",
"types/blkiodev",
"types/container",
"types/filters",
"types/network",
"types/registry",
"types/strslice",
"types/versions",
]
pruneopts = "NUT"
revision = "dea108d3aa0c67d7162a3fd8aa65f38a430019fd"
[[projects]]
digest = "1:557bc1ea003afa9a1d4036c01c027cc09eb95d1c3e9ba2d621308c1b899fb068"
name = "github.com/docker/go-connections"
packages = ["nat"]
pruneopts = "NUT"
revision = "f549a9393d05688dff0992ef3efd8bbe6c628aeb"
[[projects]]
digest = "1:5b49e5b8f96c4bb3d7f85e2410890c008a7a633192b64924ff0603e6246293e9"
name = "github.com/docker/go-units"
packages = ["."]
pruneopts = "NUT"
revision = "e30f1e79f3cd72542f2026ceec18d3bd67ab859c"
[[projects]]
digest = "1:6ea3fb0af5a25598466db91ce2f66dbd479dbc667ca3b828f19bc2a6fdffbc71"
name = "github.com/emicklei/go-restful"
packages = [
".",
"log",
]
pruneopts = "NUT"
revision = "ff4f55a206334ef123e4f79bbf348980da81ca46"
[[projects]]
digest = "1:27e00683ae7700cf7b286011dcd6ace672d542d236590dcc51bd10669cfb3366"
name = "github.com/emicklei/go-restful-swagger12"
packages = ["."]
pruneopts = "NUT"
revision = "dcef7f55730566d41eae5db10e7d6981829720f6"
version = "1.0.1"
[[projects]]
digest = "1:f0f9924706ad645471cdb966245cceab9fdc355d940a397c5aa0f4fb5e9de307"
name = "github.com/evanphx/json-patch"
packages = ["."]
pruneopts = "NUT"
revision = "ba18e35c5c1b36ef6334cad706eb681153d2d379"
[[projects]]
branch = "master"
digest = "1:857c01c483190325e4e4c967db37ace964b32cb3024f1d2d3890f2e56b5d7ce7"
name = "github.com/exponent-io/jsonpath"
packages = ["."]
pruneopts = "NUT"
revision = "d6023ce2651d8eafb5c75bb0c7167536102ec9f5"
[[projects]]
digest = "1:fcd865894ce87bcf6499e5d2db3526679e3ab1ae454f48070ebaf983efd20665"
name = "github.com/fatih/camelcase"
packages = ["."]
pruneopts = "NUT"
revision = "f6a740d52f961c60348ebb109adde9f4635d7540"
[[projects]]
digest = "1:abfe129dc92b16fbf0cc9d6336096a2823151756f62072a700eb10754141b38e"
name = "github.com/ghodss/yaml"
packages = ["."]
pruneopts = "NUT"
revision = "73d445a93680fa1a78ae23a5839bad48f32ba1ee"
[[projects]]
digest = "1:77f03b803ab40eaf17c8f418280e09838825106179563856ef235e4e93fc5745"
name = "github.com/go-openapi/analysis"
packages = ["."]
pruneopts = "NUT"
revision = "b44dc874b601d9e4e2f6e19140e794ba24bead3b"
[[projects]]
digest = "1:e90c60f901104f83cdcad5961d752541a0cd6a0bd996d7fd920bfe8f8c39f241"
name = "github.com/go-openapi/jsonpointer"
packages = ["."]
pruneopts = "NUT"
revision = "46af16f9f7b149af66e5d1bd010e3574dc06de98"
[[projects]]
digest = "1:98abd61947ff5c7c6fcfec5473d02a4821ed3a2dd99a4fbfdb7925b0dd745546"
name = "github.com/go-openapi/jsonreference"
packages = ["."]
pruneopts = "NUT"
revision = "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272"
[[projects]]
digest = "1:b3af6eb86673f9f8238f4e49be062788d32eb5b0b34d8e4cf432d131b3ece74f"
name = "github.com/go-openapi/loads"
packages = ["."]
pruneopts = "NUT"
revision = "18441dfa706d924a39a030ee2c3b1d8d81917b38"
[[projects]]
digest = "1:9a247af322a3c4780ad9ba3517f517d93925dee18a7e420209f786f5110b1a28"
name = "github.com/go-openapi/spec"
packages = ["."]
pruneopts = "NUT"
revision = "6aced65f8501fe1217321abf0749d354824ba2ff"
[[projects]]
digest = "1:4f5899c71c18d685e67cb365438b433f9d3e0c39f4c865f3d4fe80459b12ffd7"
name = "github.com/go-openapi/swag"
packages = ["."]
pruneopts = "NUT"
revision = "1d0bd113de87027671077d3c71eb3ac5d7dbba72"
[[projects]]
digest = "1:a6afc27b2a73a5506832f3c5a1c19a30772cb69e7bd1ced4639eb36a55db224f"
name = "github.com/gogo/protobuf"
packages = [
"proto",
"sortkeys",
]
pruneopts = "NUT"
revision = "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
[[projects]]
digest = "1:78b8040ece2ff622580def2708b9eb0b2857711b6744c475439bf337e9c677ea"
name = "github.com/golang/glog"
packages = ["."]
pruneopts = "NUT"
revision = "44145f04b68cf362d9c4df2182967c2275eaefed"
[[projects]]
digest = "1:7672c206322f45b33fac1ae2cb899263533ce0adcc6481d207725560208ec84e"
name = "github.com/golang/groupcache"
packages = ["lru"]
pruneopts = "NUT"
revision = "02826c3e79038b59d737d3b1c0a1d937f71a4433"
[[projects]]
digest = "1:4db1c5a2f866e5af769da91c5ff668cb0a3e76568ea855ff6c8ce7b4bb83efe4"
name = "github.com/golang/protobuf"
packages = ["proto"]
pruneopts = "NUT"
revision = "4bd1920723d7b7c925de087aa32e2187708897f7"
[[projects]]
digest = "1:f9425215dccf1c63f659ec781ca46bc81804341821d0cd8d2459c5b58f8bd067"
name = "github.com/google/gofuzz"
packages = ["."]
pruneopts = "NUT"
revision = "44d81051d367757e1c7c6a5a86423ece9afcf63c"
[[projects]]
digest = "1:475b179287e8afdcd352014b2c2500e67decdf63e66125e2129286873453e1cd"
name = "github.com/hashicorp/golang-lru"
packages = [
".",
"simplelru",
]
pruneopts = "NUT"
revision = "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
[[projects]]
branch = "master"
digest = "1:b7f860847a1d71f925ba9385ed95f1ebc0abfeb418a78e219ab61f48fdfeffad"
name = "github.com/howeyc/gopass"
packages = ["."]
pruneopts = "NUT"
revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
[[projects]]
digest = "1:8f5fa95e43ab4a43056b7b65ec1614b4440f33bbfef2fa0a4d4707aedcb1a023"
name = "github.com/imdario/mergo"
packages = ["."]
pruneopts = "NUT"
revision = "6633656539c1639d9d78127b7d47c622b5d7b6dc"
[[projects]]
digest = "1:406338ad39ab2e37b7f4452906442a3dbf0eb3379dd1f06aafb5c07e769a5fbb"
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
pruneopts = "NUT"
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
digest = "1:d06f0cc2de030b66daff28951df4c31baedae772e5d4269297cfc74bf7a92c85"
name = "github.com/juju/ratelimit"
packages = ["."]
pruneopts = "NUT"
revision = "5b9ff866471762aa2ab2dced63c9fb6f53921342"
version = "1.0"
[[projects]]
digest = "1:f9d15bd15966e80ec044ed83df29c267b87fbf646765939ee37e48287061e71b"
name = "github.com/mailru/easyjson"
packages = [
"buffer",
"jlexer",
"jwriter",
]
pruneopts = "NUT"
revision = "d5b7844b561a7bc640052f1b935f7b800330d7e0"
[[projects]]
digest = "1:f1bb94f5fab2a670687ec7a30a9160b0193d147ae82d5650231c01b2b3a8d0db"
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
pruneopts = "NUT"
revision = "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a"
[[projects]]
digest = "1:9072181164e616e422cbfbe48ca9ac249a4d76301ca0876c9f56b937cf214a2f"
name = "github.com/pborman/uuid"
packages = ["."]
pruneopts = "NUT"
revision = "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
[[projects]]
digest = "1:8226c26c81c73c8f56c675e10f11254dd9afe9470c186fb5a116965e09607e33"
name = "github.com/prometheus/client_golang"
packages = ["prometheus"]
pruneopts = "NUT"
revision = "e7e903064f5e9eb5da98208bae10b475d4db0f8c"
[[projects]]
digest = "1:9fe8945a11a9f588a9d306b4741cad634da9015a704271b9506810e2cc77fa17"
name = "github.com/prometheus/client_model"
packages = ["go"]
pruneopts = "NUT"
revision = "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
[[projects]]
digest = "1:65f12bb82877d6e049a41b5feec5f79f11e3e0ea5748f677d68f206ac408c403"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model",
]
pruneopts = "NUT"
revision = "13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207"
[[projects]]
digest = "1:6289bdad6cb0bc988631332a560688ac29a47a3f901b54a976d804a4dcbbab72"
name = "github.com/prometheus/procfs"
packages = [
".",
"xfs",
]
pruneopts = "NUT"
revision = "65c1f6f8f0fc1e2185eb9863a3bc751496404259"
[[projects]]
digest = "1:cc7c51e1d12905a4e05981ee6222570cf5050ae5d7043876d0374d5eebdd897d"
name = "github.com/spf13/cobra"
packages = ["."]
pruneopts = "NUT"
revision = "f62e98d28ab7ad31d707ba837a966378465c7b57"
[[projects]]
digest = "1:ed545897e7363038a3e467e8b24c12e7f540f6a114c08a7dca7746a3fe291358"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "NUT"
revision = "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
[[projects]]
digest = "1:6f841ce38da42c2cb2fa1c16d801942b354800eacb2a7bdf02b4fe24a151f7b8"
name = "github.com/ugorji/go"
packages = ["codec"]
pruneopts = "NUT"
revision = "ded73eae5db7e7a0ef6f55aace87a2873c5d2b74"
[[projects]]
digest = "1:9606ce8905fdfdd29f63fa7301b439d3fbaebccd19ee1c175ac41a1dc55008ad"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
pruneopts = "NUT"
revision = "0fe963104e9d1877082f8fb38f816fcd97eb1d10"
[[projects]]
digest = "1:aaf3c234ddf45f30e37dc726583793250d3c8d08793ef848322f2622d26c93ac"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"http2",
"http2/hpack",
"idna",
"lex/httplex",
]
pruneopts = "NUT"
revision = "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
[[projects]]
digest = "1:da311e132160fec8dfc9e659915b8f942e5563c27bbf3c45d2c9e67a1434ef65"
name = "golang.org/x/oauth2"
packages = [
".",
"google",
"internal",
"jws",
"jwt",
]
pruneopts = "NUT"
revision = "a6bd8cefa1811bd24b86f8902872e4e8225f74c4"
[[projects]]
digest = "1:20e0a8d05d6f97fadd3cbd16e55ce66dc8191800f58fe6bda35cdb671384eb98"
name = "golang.org/x/sys"
packages = ["unix"]
pruneopts = "NUT"
revision = "8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9"
[[projects]]
digest = "1:3e576cf9d908efd98f37ffa685e4938e87cbec623b4944aa0c56bbbbe0ca8f2b"
name = "golang.org/x/text"
packages = [
"cases",
"encoding",
"encoding/internal",
"encoding/internal/identifier",
"encoding/unicode",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"internal/utf8internal",
"language",
"runes",
"secure/bidirule",
"secure/precis",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
"width",
]
pruneopts = "NUT"
revision = "2910a502d2bf9e43193af9d68ca516529614eed3"
[[projects]]
digest = "1:1b7dbca08b033384169fcfbc0835f5f0a36b790e9d1e8e076669959901dedf10"
name = "google.golang.org/appengine"
packages = [
".",
"internal",
"internal/app_identity",
"internal/base",
"internal/datastore",
"internal/log",
"internal/modules",
"internal/remote_api",
"internal/urlfetch",
"urlfetch",
]
pruneopts = "NUT"
revision = "12d5545dc1cfa6047a286d5e853841b6471f4c19"
[[projects]]
digest = "1:ef72505cf098abdd34efeea032103377bec06abb61d8a06f002d5d296a4b1185"
name = "gopkg.in/inf.v0"
packages = ["."]
pruneopts = "NUT"
revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
version = "v0.9.0"
[[projects]]
digest = "1:302ad18387350c3d9792da66de666f76d2ca8c62c47dd6b9434269c7cfa18971"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "NUT"
revision = "53feefa2559fb8dfa8d81baad31be332c97d6c77"
[[projects]]
digest = "1:10a78d0be666fa3ec3aa2e63c0c2dc4fa83edfbdde0a35d290c482db3874b40e"
name = "k8s.io/apimachinery"
packages = [
"pkg/api/equality",
"pkg/api/errors",
"pkg/api/meta",
"pkg/api/resource",
"pkg/api/validation",
"pkg/apimachinery",
"pkg/apimachinery/announced",
"pkg/apimachinery/registered",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1/validation",
"pkg/apis/meta/v1alpha1",
"pkg/conversion",
"pkg/conversion/queryparams",
"pkg/conversion/unstructured",
"pkg/fields",
"pkg/labels",
"pkg/openapi",
"pkg/runtime",
"pkg/runtime/schema",
"pkg/runtime/serializer",
"pkg/runtime/serializer/json",
"pkg/runtime/serializer/protobuf",
"pkg/runtime/serializer/recognizer",
"pkg/runtime/serializer/streaming",
"pkg/runtime/serializer/versioning",
"pkg/selection",
"pkg/types",
"pkg/util/cache",
"pkg/util/clock",
"pkg/util/diff",
"pkg/util/errors",
"pkg/util/framer",
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/mergepatch",
"pkg/util/net",
"pkg/util/rand",
"pkg/util/runtime",
"pkg/util/sets",
"pkg/util/strategicpatch",
"pkg/util/uuid",
"pkg/util/validation",
"pkg/util/validation/field",
"pkg/util/wait",
"pkg/util/yaml",
"pkg/version",
"pkg/watch",
"third_party/forked/golang/json",
"third_party/forked/golang/reflect",
]
pruneopts = "NUT"
revision = "abe34e4f5b4413c282a83011892cbeea5b32223b"
[[projects]]
branch = "release-1.7"
digest = "1:debba4fc1eef2f9c6b43b0c6cce8478471fd2339bb8cf87bcb0badf6928d9f2d"
name = "k8s.io/apiserver"
packages = [
"pkg/apis/audit",
"pkg/authentication/authenticator",
"pkg/authentication/serviceaccount",
"pkg/authentication/user",
"pkg/endpoints/request",
"pkg/features",
"pkg/util/feature",
"pkg/util/flag",
]
pruneopts = "NUT"
revision = "16d918e3cba143f774f09a6bd7bd1f5e314335e3"
[[projects]]
digest = "1:cad77f31c88f0f95f790a4d5783b0f7a15154a80329becdef3b7f1205c01b9f2"
name = "k8s.io/client-go"
packages = [
"discovery",
"dynamic",
"kubernetes",
"kubernetes/scheme",
"kubernetes/typed/admissionregistration/v1alpha1",
"kubernetes/typed/apps/v1beta1",
"kubernetes/typed/authentication/v1",
"kubernetes/typed/authentication/v1beta1",
"kubernetes/typed/authorization/v1",
"kubernetes/typed/authorization/v1beta1",
"kubernetes/typed/autoscaling/v1",
"kubernetes/typed/autoscaling/v2alpha1",
"kubernetes/typed/batch/v1",
"kubernetes/typed/batch/v2alpha1",
"kubernetes/typed/certificates/v1beta1",
"kubernetes/typed/core/v1",
"kubernetes/typed/extensions/v1beta1",
"kubernetes/typed/networking/v1",
"kubernetes/typed/policy/v1beta1",
"kubernetes/typed/rbac/v1alpha1",
"kubernetes/typed/rbac/v1beta1",
"kubernetes/typed/settings/v1alpha1",
"kubernetes/typed/storage/v1",
"kubernetes/typed/storage/v1beta1",
"pkg/api",
"pkg/api/v1",
"pkg/api/v1/ref",
"pkg/apis/admissionregistration",
"pkg/apis/admissionregistration/v1alpha1",
"pkg/apis/apps",
"pkg/apis/apps/v1beta1",
"pkg/apis/authentication",
"pkg/apis/authentication/v1",
"pkg/apis/authentication/v1beta1",
"pkg/apis/authorization",
"pkg/apis/authorization/v1",
"pkg/apis/authorization/v1beta1",
"pkg/apis/autoscaling",
"pkg/apis/autoscaling/v1",
"pkg/apis/autoscaling/v2alpha1",
"pkg/apis/batch",
"pkg/apis/batch/v1",
"pkg/apis/batch/v2alpha1",
"pkg/apis/certificates",
"pkg/apis/certificates/v1beta1",
"pkg/apis/extensions",
"pkg/apis/extensions/v1beta1",
"pkg/apis/networking",
"pkg/apis/networking/v1",
"pkg/apis/policy",
"pkg/apis/policy/v1beta1",
"pkg/apis/rbac",
"pkg/apis/rbac/v1alpha1",
"pkg/apis/rbac/v1beta1",
"pkg/apis/settings",
"pkg/apis/settings/v1alpha1",
"pkg/apis/storage",
"pkg/apis/storage/v1",
"pkg/apis/storage/v1beta1",
"pkg/util",
"pkg/util/parsers",
"pkg/version",
"plugin/pkg/client/auth",
"plugin/pkg/client/auth/azure",
"plugin/pkg/client/auth/gcp",
"plugin/pkg/client/auth/oidc",
"rest",
"rest/watch",
"third_party/forked/golang/template",
"tools/auth",
"tools/cache",
"tools/clientcmd",
"tools/clientcmd/api",
"tools/clientcmd/api/latest",
"tools/clientcmd/api/v1",
"tools/metrics",
"tools/record",
"transport",
"util/cert",
"util/flowcontrol",
"util/homedir",
"util/integer",
"util/jsonpath",
"util/workqueue",
]
pruneopts = "NUT"
revision = "df46f7f13b3da19b90b8b4f0d18b8adc6fbf28dc"
version = "v4.0.0-beta.0"
[[projects]]
digest = "1:6b2e4d40f4e78acbbdfe656b846a9f68e4c4db2239d219c3ad97535172ed8fb4"
name = "k8s.io/kubernetes"
packages = [
"cmd/kubeadm/app/apis/kubeadm",
"federation/apis/federation",
"federation/apis/federation/install",
"federation/apis/federation/v1beta1",
"federation/client/clientset_generated/federation_internalclientset",
"federation/client/clientset_generated/federation_internalclientset/scheme",
"federation/client/clientset_generated/federation_internalclientset/typed/autoscaling/internalversion",
"federation/client/clientset_generated/federation_internalclientset/typed/batch/internalversion",
"federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion",
"federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion",
"federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion",
"pkg/api",
"pkg/api/events",
"pkg/api/helper",
"pkg/api/helper/qos",
"pkg/api/install",
"pkg/api/pod",
"pkg/api/ref",
"pkg/api/resource",
"pkg/api/service",
"pkg/api/util",
"pkg/api/v1",
"pkg/api/v1/helper",
"pkg/api/v1/helper/qos",
"pkg/api/v1/pod",
"pkg/api/v1/ref",
"pkg/api/validation",
"pkg/apis/admissionregistration",
"pkg/apis/admissionregistration/install",
"pkg/apis/admissionregistration/v1alpha1",
"pkg/apis/apps",
"pkg/apis/apps/install",
"pkg/apis/apps/v1beta1",
"pkg/apis/authentication",
"pkg/apis/authentication/install",
"pkg/apis/authentication/v1",
"pkg/apis/authentication/v1beta1",
"pkg/apis/authorization",
"pkg/apis/authorization/install",
"pkg/apis/authorization/v1",
"pkg/apis/authorization/v1beta1",
"pkg/apis/autoscaling",
"pkg/apis/autoscaling/install",
"pkg/apis/autoscaling/v1",
"pkg/apis/autoscaling/v2alpha1",
"pkg/apis/batch",
"pkg/apis/batch/install",
"pkg/apis/batch/v1",
"pkg/apis/batch/v2alpha1",
"pkg/apis/certificates",
"pkg/apis/certificates/install",
"pkg/apis/certificates/v1beta1",
"pkg/apis/componentconfig",
"pkg/apis/componentconfig/install",
"pkg/apis/componentconfig/v1alpha1",
"pkg/apis/extensions",
"pkg/apis/extensions/install",
"pkg/apis/extensions/v1beta1",
"pkg/apis/networking",
"pkg/apis/networking/install",
"pkg/apis/networking/v1",
"pkg/apis/policy",
"pkg/apis/policy/install",
"pkg/apis/policy/v1beta1",
"pkg/apis/rbac",
"pkg/apis/rbac/install",
"pkg/apis/rbac/v1alpha1",
"pkg/apis/rbac/v1beta1",
"pkg/apis/settings",
"pkg/apis/settings/install",
"pkg/apis/settings/v1alpha1",
"pkg/apis/storage",
"pkg/apis/storage/install",
"pkg/apis/storage/util",
"pkg/apis/storage/v1",
"pkg/apis/storage/v1beta1",
"pkg/capabilities",
"pkg/client/clientset_generated/clientset",
"pkg/client/clientset_generated/clientset/scheme",
"pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1",
"pkg/client/clientset_generated/clientset/typed/apps/v1beta1",
"pkg/client/clientset_generated/clientset/typed/authentication/v1",
"pkg/client/clientset_generated/clientset/typed/authentication/v1beta1",
"pkg/client/clientset_generated/clientset/typed/authorization/v1",
"pkg/client/clientset_generated/clientset/typed/authorization/v1beta1",
"pkg/client/clientset_generated/clientset/typed/autoscaling/v1",
"pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1",
"pkg/client/clientset_generated/clientset/typed/batch/v1",
"pkg/client/clientset_generated/clientset/typed/batch/v2alpha1",
"pkg/client/clientset_generated/clientset/typed/certificates/v1beta1",
"pkg/client/clientset_generated/clientset/typed/core/v1",
"pkg/client/clientset_generated/clientset/typed/extensions/v1beta1",
"pkg/client/clientset_generated/clientset/typed/networking/v1",
"pkg/client/clientset_generated/clientset/typed/policy/v1beta1",
"pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1",
"pkg/client/clientset_generated/clientset/typed/rbac/v1beta1",
"pkg/client/clientset_generated/clientset/typed/settings/v1alpha1",
"pkg/client/clientset_generated/clientset/typed/storage/v1",
"pkg/client/clientset_generated/clientset/typed/storage/v1beta1",
"pkg/client/clientset_generated/internalclientset",
"pkg/client/clientset_generated/internalclientset/scheme",
"pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion",
"pkg/client/clientset_generated/internalclientset/typed/apps/internalversion",
"pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion",
"pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion",
"pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion",
"pkg/client/clientset_generated/internalclientset/typed/batch/internalversion",
"pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion",
"pkg/client/clientset_generated/internalclientset/typed/core/internalversion",
"pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion",
"pkg/client/clientset_generated/internalclientset/typed/networking/internalversion",
"pkg/client/clientset_generated/internalclientset/typed/policy/internalversion",
"pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion",
"pkg/client/clientset_generated/internalclientset/typed/settings/internalversion",
"pkg/client/clientset_generated/internalclientset/typed/storage/internalversion",
"pkg/client/informers/informers_generated/externalversions/apps/v1beta1",
"pkg/client/informers/informers_generated/externalversions/core/v1",
"pkg/client/informers/informers_generated/externalversions/extensions/v1beta1",
"pkg/client/informers/informers_generated/externalversions/internalinterfaces",
"pkg/client/leaderelection/resourcelock",
"pkg/client/listers/apps/v1beta1",
"pkg/client/listers/core/v1",
"pkg/client/listers/extensions/v1beta1",
"pkg/client/retry",
"pkg/client/unversioned",
"pkg/controller",
"pkg/controller/daemon",
"pkg/controller/daemon/util",
"pkg/controller/deployment/util",
"pkg/credentialprovider",
"pkg/features",
"pkg/fieldpath",
"pkg/kubectl",
"pkg/kubectl/cmd/util",
"pkg/kubectl/cmd/util/openapi",
"pkg/kubectl/plugins",
"pkg/kubectl/resource",
"pkg/kubectl/util",
"pkg/kubelet/apis",
"pkg/kubelet/qos",
"pkg/kubelet/types",
"pkg/master/ports",
"pkg/printers",
"pkg/printers/internalversion",
"pkg/registry/rbac/validation",
"pkg/security/apparmor",
"pkg/serviceaccount",
"pkg/util",
"pkg/util/exec",
"pkg/util/hash",
"pkg/util/labels",
"pkg/util/metrics",
"pkg/util/mount",
"pkg/util/net/sets",
"pkg/util/node",
"pkg/util/parsers",
"pkg/util/slice",
"pkg/version",
"pkg/volume/util",
"plugin/pkg/scheduler/algorithm",
"plugin/pkg/scheduler/algorithm/predicates",
"plugin/pkg/scheduler/algorithm/priorities/util",
"plugin/pkg/scheduler/api",
"plugin/pkg/scheduler/schedulercache",
"plugin/pkg/scheduler/util",
]
pruneopts = "NUT"
revision = "d3ada0119e776222f11ec7945e6d860061339aad"
version = "v1.7.0"
[[projects]]
digest = "1:ad9115537c88429538df5839659c53a7eb8410e799b65bc64f0a37fc1542dd49"
name = "k8s.io/metrics"
packages = [
"pkg/apis/metrics",
"pkg/apis/metrics/v1alpha1",
"pkg/client/clientset_generated/clientset",
"pkg/client/clientset_generated/clientset/scheme",
"pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1",
]
pruneopts = "NUT"
revision = "02cccda7258b5d9709c19dd7413ccb15e15d03ce"
version = "kubernetes-1.7.10"
[[projects]]
digest = "1:2385d1bef8989a5f919a71efee3f337df3f0ad7b29ee23204b5614aad3e3f577"
name = "vbom.ml/util"
packages = ["sortorder"]
pruneopts = "NUT"
revision = "db5cfe13f5cc80a4990d98e2e1b0707a4d1a5394"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"k8s.io/apimachinery/pkg/apis/meta/v1",
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured",
"k8s.io/apimachinery/pkg/util/errors",
"k8s.io/client-go/kubernetes",
"k8s.io/client-go/pkg/api/v1",
"k8s.io/client-go/tools/clientcmd",
"k8s.io/kubernetes/pkg/kubectl/cmd/util",
"k8s.io/kubernetes/pkg/kubectl/resource",
]
solver-name = "gps-cdcl"
solver-version = 1

12
tests/smoke/Gopkg.toml Normal file
View File

@@ -0,0 +1,12 @@
[[constraint]]
name = "k8s.io/client-go"
version = "4.0.0-beta.0"
[[constraint]]
name = "k8s.io/kubernetes"
version = "1.7.0"
[prune]
non-go = true
go-tests = true
unused-packages = true

549
tests/smoke/glide.lock generated
View File

@@ -1,549 +0,0 @@
hash: d8474c63dff64aae6e66874506650f49ed247a89342c5066874a2c7d28e5a3ec
updated: 2018-02-01T15:35:54.10429791+01:00
imports:
- name: github.com/coreos/ktestutil
version: 47edf51e9cdb1955e93e56e6c84ed944fdda4789
subpackages:
- testworkload
- name: k8s.io/apimachinery
version: abe34e4f5b4413c282a83011892cbeea5b32223b
subpackages:
- pkg/api/equality
- pkg/api/errors
- pkg/api/meta
- pkg/api/resource
- pkg/api/validation
- pkg/apimachinery
- pkg/apimachinery/announced
- pkg/apimachinery/registered
- pkg/apis/meta/v1
- pkg/apis/meta/v1/unstructured
- pkg/apis/meta/v1/validation
- pkg/apis/meta/v1alpha1
- pkg/conversion
- pkg/conversion/queryparams
- pkg/conversion/unstructured
- pkg/fields
- pkg/labels
- pkg/openapi
- pkg/runtime
- pkg/runtime/schema
- pkg/runtime/serializer
- pkg/runtime/serializer/json
- pkg/runtime/serializer/protobuf
- pkg/runtime/serializer/recognizer
- pkg/runtime/serializer/streaming
- pkg/runtime/serializer/versioning
- pkg/selection
- pkg/types
- pkg/util/cache
- pkg/util/clock
- pkg/util/diff
- pkg/util/errors
- pkg/util/framer
- pkg/util/intstr
- pkg/util/json
- pkg/util/mergepatch
- pkg/util/net
- pkg/util/rand
- pkg/util/runtime
- pkg/util/sets
- pkg/util/strategicpatch
- pkg/util/uuid
- pkg/util/validation
- pkg/util/validation/field
- pkg/util/wait
- pkg/util/yaml
- pkg/version
- pkg/watch
- third_party/forked/golang/json
- third_party/forked/golang/reflect
- name: k8s.io/apiserver
version: 16d918e3cba143f774f09a6bd7bd1f5e314335e3
subpackages:
- pkg/apis/audit
- pkg/authentication/authenticator
- pkg/authentication/serviceaccount
- pkg/authentication/user
- pkg/endpoints/request
- pkg/features
- pkg/util/feature
- pkg/util/flag
- name: k8s.io/client-go
version: df46f7f13b3da19b90b8b4f0d18b8adc6fbf28dc
subpackages:
- discovery
- dynamic
- kubernetes
- kubernetes/scheme
- kubernetes/typed/admissionregistration/v1alpha1
- kubernetes/typed/apps/v1beta1
- kubernetes/typed/authentication/v1
- kubernetes/typed/authentication/v1beta1
- kubernetes/typed/authorization/v1
- kubernetes/typed/authorization/v1beta1
- kubernetes/typed/autoscaling/v1
- kubernetes/typed/autoscaling/v2alpha1
- kubernetes/typed/batch/v1
- kubernetes/typed/batch/v2alpha1
- kubernetes/typed/certificates/v1beta1
- kubernetes/typed/core/v1
- kubernetes/typed/extensions/v1beta1
- kubernetes/typed/networking/v1
- kubernetes/typed/policy/v1beta1
- kubernetes/typed/rbac/v1alpha1
- kubernetes/typed/rbac/v1beta1
- kubernetes/typed/settings/v1alpha1
- kubernetes/typed/storage/v1
- kubernetes/typed/storage/v1beta1
- pkg/api
- pkg/api/v1
- pkg/api/v1/ref
- pkg/apis/admissionregistration
- pkg/apis/admissionregistration/v1alpha1
- pkg/apis/apps
- pkg/apis/apps/v1beta1
- pkg/apis/authentication
- pkg/apis/authentication/v1
- pkg/apis/authentication/v1beta1
- pkg/apis/authorization
- pkg/apis/authorization/v1
- pkg/apis/authorization/v1beta1
- pkg/apis/autoscaling
- pkg/apis/autoscaling/v1
- pkg/apis/autoscaling/v2alpha1
- pkg/apis/batch
- pkg/apis/batch/v1
- pkg/apis/batch/v2alpha1
- pkg/apis/certificates
- pkg/apis/certificates/v1beta1
- pkg/apis/extensions
- pkg/apis/extensions/v1beta1
- pkg/apis/networking
- pkg/apis/networking/v1
- pkg/apis/policy
- pkg/apis/policy/v1beta1
- pkg/apis/rbac
- pkg/apis/rbac/v1alpha1
- pkg/apis/rbac/v1beta1
- pkg/apis/settings
- pkg/apis/settings/v1alpha1
- pkg/apis/storage
- pkg/apis/storage/v1
- pkg/apis/storage/v1beta1
- pkg/util
- pkg/util/parsers
- pkg/version
- plugin/pkg/client/auth
- plugin/pkg/client/auth/azure
- plugin/pkg/client/auth/gcp
- plugin/pkg/client/auth/oidc
- rest
- rest/watch
- third_party/forked/golang/template
- tools/auth
- tools/cache
- tools/clientcmd
- tools/clientcmd/api
- tools/clientcmd/api/latest
- tools/clientcmd/api/v1
- tools/metrics
- tools/record
- transport
- util/cert
- util/flowcontrol
- util/homedir
- util/integer
- util/jsonpath
- util/workqueue
- name: k8s.io/kubernetes
version: d3ada0119e776222f11ec7945e6d860061339aad
subpackages:
- cmd/kubeadm/app/apis/kubeadm
- federation/apis/federation
- federation/apis/federation/install
- federation/apis/federation/v1beta1
- federation/client/clientset_generated/federation_internalclientset
- federation/client/clientset_generated/federation_internalclientset/scheme
- federation/client/clientset_generated/federation_internalclientset/typed/autoscaling/internalversion
- federation/client/clientset_generated/federation_internalclientset/typed/batch/internalversion
- federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion
- federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion
- federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion
- pkg/api
- pkg/api/events
- pkg/api/helper
- pkg/api/helper/qos
- pkg/api/install
- pkg/api/pod
- pkg/api/ref
- pkg/api/resource
- pkg/api/service
- pkg/api/util
- pkg/api/v1
- pkg/api/v1/helper
- pkg/api/v1/helper/qos
- pkg/api/v1/pod
- pkg/api/v1/ref
- pkg/api/validation
- pkg/apis/admissionregistration
- pkg/apis/admissionregistration/install
- pkg/apis/admissionregistration/v1alpha1
- pkg/apis/apps
- pkg/apis/apps/install
- pkg/apis/apps/v1beta1
- pkg/apis/authentication
- pkg/apis/authentication/install
- pkg/apis/authentication/v1
- pkg/apis/authentication/v1beta1
- pkg/apis/authorization
- pkg/apis/authorization/install
- pkg/apis/authorization/v1
- pkg/apis/authorization/v1beta1
- pkg/apis/autoscaling
- pkg/apis/autoscaling/install
- pkg/apis/autoscaling/v1
- pkg/apis/autoscaling/v2alpha1
- pkg/apis/batch
- pkg/apis/batch/install
- pkg/apis/batch/v1
- pkg/apis/batch/v2alpha1
- pkg/apis/certificates
- pkg/apis/certificates/install
- pkg/apis/certificates/v1beta1
- pkg/apis/componentconfig
- pkg/apis/componentconfig/install
- pkg/apis/componentconfig/v1alpha1
- pkg/apis/extensions
- pkg/apis/extensions/install
- pkg/apis/extensions/v1beta1
- pkg/apis/networking
- pkg/apis/networking/install
- pkg/apis/networking/v1
- pkg/apis/policy
- pkg/apis/policy/install
- pkg/apis/policy/v1beta1
- pkg/apis/rbac
- pkg/apis/rbac/install
- pkg/apis/rbac/v1alpha1
- pkg/apis/rbac/v1beta1
- pkg/apis/settings
- pkg/apis/settings/install
- pkg/apis/settings/v1alpha1
- pkg/apis/storage
- pkg/apis/storage/install
- pkg/apis/storage/util
- pkg/apis/storage/v1
- pkg/apis/storage/v1beta1
- pkg/capabilities
- pkg/client/clientset_generated/clientset
- pkg/client/clientset_generated/clientset/scheme
- pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1
- pkg/client/clientset_generated/clientset/typed/apps/v1beta1
- pkg/client/clientset_generated/clientset/typed/authentication/v1
- pkg/client/clientset_generated/clientset/typed/authentication/v1beta1
- pkg/client/clientset_generated/clientset/typed/authorization/v1
- pkg/client/clientset_generated/clientset/typed/authorization/v1beta1
- pkg/client/clientset_generated/clientset/typed/autoscaling/v1
- pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1
- pkg/client/clientset_generated/clientset/typed/batch/v1
- pkg/client/clientset_generated/clientset/typed/batch/v2alpha1
- pkg/client/clientset_generated/clientset/typed/certificates/v1beta1
- pkg/client/clientset_generated/clientset/typed/core/v1
- pkg/client/clientset_generated/clientset/typed/extensions/v1beta1
- pkg/client/clientset_generated/clientset/typed/networking/v1
- pkg/client/clientset_generated/clientset/typed/policy/v1beta1
- pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1
- pkg/client/clientset_generated/clientset/typed/rbac/v1beta1
- pkg/client/clientset_generated/clientset/typed/settings/v1alpha1
- pkg/client/clientset_generated/clientset/typed/storage/v1
- pkg/client/clientset_generated/clientset/typed/storage/v1beta1
- pkg/client/clientset_generated/internalclientset
- pkg/client/clientset_generated/internalclientset/scheme
- pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion
- pkg/client/clientset_generated/internalclientset/typed/apps/internalversion
- pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion
- pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion
- pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion
- pkg/client/clientset_generated/internalclientset/typed/batch/internalversion
- pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion
- pkg/client/clientset_generated/internalclientset/typed/core/internalversion
- pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion
- pkg/client/clientset_generated/internalclientset/typed/networking/internalversion
- pkg/client/clientset_generated/internalclientset/typed/policy/internalversion
- pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion
- pkg/client/clientset_generated/internalclientset/typed/settings/internalversion
- pkg/client/clientset_generated/internalclientset/typed/storage/internalversion
- pkg/client/informers/informers_generated/externalversions/apps/v1beta1
- pkg/client/informers/informers_generated/externalversions/core/v1
- pkg/client/informers/informers_generated/externalversions/extensions/v1beta1
- pkg/client/informers/informers_generated/externalversions/internalinterfaces
- pkg/client/leaderelection/resourcelock
- pkg/client/listers/apps/v1beta1
- pkg/client/listers/core/v1
- pkg/client/listers/extensions/v1beta1
- pkg/client/retry
- pkg/client/unversioned
- pkg/controller
- pkg/controller/daemon
- pkg/controller/daemon/util
- pkg/controller/deployment/util
- pkg/credentialprovider
- pkg/features
- pkg/fieldpath
- pkg/kubectl
- pkg/kubectl/cmd/util
- pkg/kubectl/cmd/util/openapi
- pkg/kubectl/plugins
- pkg/kubectl/resource
- pkg/kubectl/util
- pkg/kubelet/apis
- pkg/kubelet/qos
- pkg/kubelet/types
- pkg/master/ports
- pkg/printers
- pkg/printers/internalversion
- pkg/registry/rbac/validation
- pkg/security/apparmor
- pkg/serviceaccount
- pkg/util
- pkg/util/exec
- pkg/util/hash
- pkg/util/labels
- pkg/util/metrics
- pkg/util/mount
- pkg/util/net/sets
- pkg/util/node
- pkg/util/parsers
- pkg/util/slice
- pkg/version
- pkg/volume/util
- plugin/pkg/scheduler/algorithm
- plugin/pkg/scheduler/algorithm/predicates
- plugin/pkg/scheduler/algorithm/priorities/util
- plugin/pkg/scheduler/api
- plugin/pkg/scheduler/schedulercache
- plugin/pkg/scheduler/util
testImports:
- name: cloud.google.com/go
version: 3b1ae45394a234c385be014e9a488f2bb6eef821
subpackages:
- compute/metadata
- internal
- name: github.com/Azure/go-autorest
version: d7c034a8af24eda120dd6460bfcd6d9ed14e43ca
subpackages:
- autorest
- autorest/azure
- autorest/date
- name: github.com/beorn7/perks
version: 3ac7bf7a47d159a033b107610db8a1b6575507a4
subpackages:
- quantile
- name: github.com/davecgh/go-spew
version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
subpackages:
- spew
- name: github.com/dgrijalva/jwt-go
version: 01aeca54ebda6e0fbfafd0a524d234159c05ec20
- name: github.com/docker/distribution
version: cd27f179f2c10c5d300e6d09025b538c475b0d51
subpackages:
- digest
- reference
- name: github.com/docker/engine-api
version: dea108d3aa0c67d7162a3fd8aa65f38a430019fd
subpackages:
- client
- client/transport
- client/transport/cancellable
- types
- types/blkiodev
- types/container
- types/filters
- types/network
- types/reference
- types/registry
- types/strslice
- types/time
- types/versions
- name: github.com/docker/go-connections
version: f549a9393d05688dff0992ef3efd8bbe6c628aeb
subpackages:
- nat
- sockets
- tlsconfig
- name: github.com/docker/go-units
version: e30f1e79f3cd72542f2026ceec18d3bd67ab859c
- name: github.com/emicklei/go-restful
version: ff4f55a206334ef123e4f79bbf348980da81ca46
subpackages:
- log
- name: github.com/emicklei/go-restful-swagger12
version: dcef7f55730566d41eae5db10e7d6981829720f6
- name: github.com/evanphx/json-patch
version: ba18e35c5c1b36ef6334cad706eb681153d2d379
- name: github.com/exponent-io/jsonpath
version: d6023ce2651d8eafb5c75bb0c7167536102ec9f5
- name: github.com/fatih/camelcase
version: f6a740d52f961c60348ebb109adde9f4635d7540
- name: github.com/ghodss/yaml
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
- name: github.com/go-openapi/analysis
version: b44dc874b601d9e4e2f6e19140e794ba24bead3b
- name: github.com/go-openapi/jsonpointer
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
- name: github.com/go-openapi/jsonreference
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
- name: github.com/go-openapi/loads
version: 18441dfa706d924a39a030ee2c3b1d8d81917b38
- name: github.com/go-openapi/spec
version: 6aced65f8501fe1217321abf0749d354824ba2ff
- name: github.com/go-openapi/swag
version: 1d0bd113de87027671077d3c71eb3ac5d7dbba72
- name: github.com/gogo/protobuf
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7
subpackages:
- proto
- sortkeys
- name: github.com/golang/glog
version: 44145f04b68cf362d9c4df2182967c2275eaefed
- name: github.com/golang/groupcache
version: 02826c3e79038b59d737d3b1c0a1d937f71a4433
subpackages:
- lru
- name: github.com/golang/protobuf
version: 4bd1920723d7b7c925de087aa32e2187708897f7
subpackages:
- proto
- name: github.com/google/gofuzz
version: 44d81051d367757e1c7c6a5a86423ece9afcf63c
- name: github.com/hashicorp/golang-lru
version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
subpackages:
- simplelru
- name: github.com/howeyc/gopass
version: bf9dde6d0d2c004a008c27aaee91170c786f6db8
- name: github.com/imdario/mergo
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc
- name: github.com/inconshreveable/mousetrap
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- name: github.com/juju/ratelimit
version: 5b9ff866471762aa2ab2dced63c9fb6f53921342
- name: github.com/mailru/easyjson
version: d5b7844b561a7bc640052f1b935f7b800330d7e0
subpackages:
- buffer
- jlexer
- jwriter
- name: github.com/matttproud/golang_protobuf_extensions
version: fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a
subpackages:
- pbutil
- name: github.com/pborman/uuid
version: ca53cad383cad2479bbba7f7a1a05797ec1386e4
- name: github.com/prometheus/client_golang
version: e7e903064f5e9eb5da98208bae10b475d4db0f8c
subpackages:
- prometheus
- prometheus/promhttp
- name: github.com/prometheus/client_model
version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6
subpackages:
- go
- name: github.com/prometheus/common
version: 13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207
subpackages:
- expfmt
- internal/bitbucket.org/ww/goautoneg
- model
- name: github.com/prometheus/procfs
version: 65c1f6f8f0fc1e2185eb9863a3bc751496404259
subpackages:
- xfs
- name: github.com/PuerkitoBio/purell
version: 8a290539e2e8629dbc4e6bad948158f790ec31f4
- name: github.com/PuerkitoBio/urlesc
version: 5bd2802263f21d8788851d5305584c82a5c75d7e
- name: github.com/spf13/cobra
version: f62e98d28ab7ad31d707ba837a966378465c7b57
subpackages:
- doc
- name: github.com/spf13/pflag
version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7
- name: github.com/ugorji/go
version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74
subpackages:
- codec
- name: golang.org/x/crypto
version: 0fe963104e9d1877082f8fb38f816fcd97eb1d10
subpackages:
- ssh
- ssh/agent
- ssh/terminal
- name: golang.org/x/net
version: f2499483f923065a842d38eb4c7f1927e6fc6e6d
subpackages:
- context
- context/ctxhttp
- http2
- http2/hpack
- idna
- lex/httplex
- name: golang.org/x/oauth2
version: a6bd8cefa1811bd24b86f8902872e4e8225f74c4
subpackages:
- google
- internal
- jws
- jwt
- name: golang.org/x/sys
version: 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9
subpackages:
- unix
- name: golang.org/x/text
version: 2910a502d2bf9e43193af9d68ca516529614eed3
subpackages:
- cases
- encoding
- encoding/internal
- encoding/internal/identifier
- encoding/unicode
- internal/tag
- internal/utf8internal
- language
- runes
- secure/bidirule
- secure/precis
- transform
- unicode/bidi
- unicode/norm
- width
- name: google.golang.org/appengine
version: 12d5545dc1cfa6047a286d5e853841b6471f4c19
subpackages:
- internal
- internal/app_identity
- internal/base
- internal/datastore
- internal/log
- internal/modules
- internal/remote_api
- internal/urlfetch
- urlfetch
- name: gopkg.in/inf.v0
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
- name: gopkg.in/yaml.v2
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
- name: k8s.io/metrics
version: 02cccda7258b5d9709c19dd7413ccb15e15d03ce
subpackages:
- pkg/apis/metrics
- pkg/apis/metrics/v1alpha1
- pkg/client/clientset_generated/clientset
- pkg/client/clientset_generated/clientset/scheme
- pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1
- name: vbom.ml/util
version: db5cfe13f5cc80a4990d98e2e1b0707a4d1a5394
subpackages:
- sortorder

View File

@@ -1,13 +0,0 @@
---
package: "github.com/openshift/installer/tests/smoke"
import:
- package: k8s.io/client-go
version: v4.0.0-beta.0
- package: k8s.io/kubernetes
version: v1.7.0
- package: k8s.io/apimachinery
version: abe34e4f5b4413c282a83011892cbeea5b32223b
- package: github.com/coreos/ktestutil
version: 47edf51e9cdb1955e93e56e6c84ed944fdda4789
- package: k8s.io/apiserver
version: release-1.7

15
tests/smoke/vendor/cloud.google.com/go/AUTHORS generated vendored Normal file
View File

@@ -0,0 +1,15 @@
# This is the official list of cloud authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
Filippo Valsorda <hi@filippo.io>
Google Inc.
Ingo Oeser <nightlyone@googlemail.com>
Palm Stone Games, Inc.
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Tyler Treat <ttreat31@gmail.com>

34
tests/smoke/vendor/cloud.google.com/go/CONTRIBUTORS generated vendored Normal file
View File

@@ -0,0 +1,34 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# Names should be added to this file as:
# Name <email address>
# Keep the list alphabetically sorted.
Andreas Litt <andreas.litt@gmail.com>
Andrew Gerrand <adg@golang.org>
Brad Fitzpatrick <bradfitz@golang.org>
Burcu Dogan <jbd@google.com>
Dave Day <djd@golang.org>
David Sansome <me@davidsansome.com>
David Symonds <dsymonds@golang.org>
Filippo Valsorda <hi@filippo.io>
Glenn Lewis <gmlewis@google.com>
Ingo Oeser <nightlyone@googlemail.com>
Johan Euphrosine <proppy@google.com>
Jonathan Amsterdam <jba@google.com>
Luna Duclos <luna.duclos@palmstonegames.com>
Michael McGreevy <mcgreevy@golang.org>
Omar Jarjur <ojarjur@google.com>
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Sarah Adams <shadams@google.com>
Toby Burress <kurin@google.com>
Tuo Shan <shantuo@google.com>
Tyler Treat <ttreat31@gmail.com>

View File

@@ -1,20 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package cloud is the root of the packages used to access Google Cloud
// Services. See https://godoc.org/cloud.google.com/go for a full list
// of sub-packages.
//
// This package documents how to authorize and authenticate the sub packages.
package cloud // import "cloud.google.com/go"

View File

@@ -1,323 +0,0 @@
package testworkload
import (
"fmt"
"time"
"github.com/golang/glog"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
utilrand "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
batchv1 "k8s.io/client-go/pkg/apis/batch/v1"
extensionsv1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1"
)
var (
// PollTimeoutForNginx is the max duration for polling when using Nginx testworkload.
PollTimeoutForNginx = 10 * time.Minute
// PollIntervalForNginx is the interval between each condition check of poolling when using Nginx testworkload.
PollIntervalForNginx = 10 * time.Second
)
// Nginx creates a temp nginx deployment/service pair
// that can be used as a test workload
type Nginx struct {
Namespace string
Name string
// List of pods that belong to the deployment
Pods []*v1.Pod
client kubernetes.Interface
podSelector *metav1.LabelSelector
nodeSelector *metav1.LabelSelector
pingPodSelector *metav1.LabelSelector
}
// NginxOpts defines func that applies custom options for Nginx
type NginxOpts func(*Nginx) error
// NewNginx create this nginx deployment/service pair.
// It waits until all the pods in the deployment are running.
func NewNginx(kc kubernetes.Interface, namespace string, options ...NginxOpts) (*Nginx, error) {
//create random suffix
name := fmt.Sprintf("nginx-%s", utilrand.String(5))
n := &Nginx{
Namespace: namespace,
Name: name,
podSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": name}},
nodeSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{},
},
pingPodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{},
},
client: kc,
}
//apply options
for _, option := range options {
if err := option(n); err != nil {
return nil, fmt.Errorf("error invalid options: %v", err)
}
}
var err error
defer func() {
if err != nil {
n.Delete()
}
}()
if err = n.newNginxDeployment(); err != nil {
return nil, fmt.Errorf("error creating deployment %s: %v", n.Name, err)
}
if err = n.newNginxService(); err != nil {
return nil, fmt.Errorf("error creating service %s: %v", n.Name, err)
}
if err = wait.PollImmediate(PollIntervalForNginx, PollTimeoutForNginx, func() (bool, error) {
d, err := kc.ExtensionsV1beta1().Deployments(n.Namespace).Get(n.Name, metav1.GetOptions{})
if err != nil {
glog.Errorf("Error getting deployment %s: %v", n.Name, err)
return false, nil
}
if d.Status.UpdatedReplicas != d.Status.AvailableReplicas && d.Status.UnavailableReplicas != 0 {
return false, nil
}
//wait for all pods to enter running phase
pl, err := kc.CoreV1().Pods(n.Namespace).List(metav1.ListOptions{
LabelSelector: metav1.FormatLabelSelector(n.podSelector),
})
if err != nil {
glog.Errorf("Error getting pods for deployment %s: %v", n.Name, err)
return false, nil
}
if len(pl.Items) == 0 {
return false, nil
}
var pods []*v1.Pod
for i := range pl.Items {
p := &pl.Items[i]
if p.Status.Phase != v1.PodRunning {
return false, nil
}
pods = append(pods, p)
}
n.Pods = pods
return true, nil
}); err != nil {
return nil, fmt.Errorf("deployment %s is not ready: %v", n.Name, err)
}
return n, nil
}
// WithNginxSelector adds custom labels for Deployment's Selector field.
// Affects only Deployment pods.
func WithNginxSelector(labels map[string]string) NginxOpts {
return func(n *Nginx) error {
for k, v := range labels {
n.podSelector.MatchLabels[k] = v
}
return nil
}
}
// WithNginxNodeSelector adds custom labels for Pod's NodeSelector field
// Affects only Deployment's pods.
func WithNginxNodeSelector(labels map[string]string) NginxOpts {
return func(n *Nginx) error {
for k, v := range labels {
n.nodeSelector.MatchLabels[k] = v
}
return nil
}
}
// WithNginxPingJobLabels adds custom labels for PinJob's pods.
// Affects only PingJob's pods.
func WithNginxPingJobLabels(labels map[string]string) NginxOpts {
return func(n *Nginx) error {
for k, v := range labels {
n.pingPodSelector.MatchLabels[k] = v
}
return nil
}
}
// IsReachable pings the nginx service.
// Expects the nginx service to be reachable.
func (n *Nginx) IsReachable() error {
if err := n.newPingPod(true); err != nil {
return fmt.Errorf("error svc wasn't reachable: %v", err)
}
return nil
}
// IsUnReachable pings the nginx service.
// Expects the nginx service to be unreachable.
func (n *Nginx) IsUnReachable() error {
if err := n.newPingPod(false); err != nil {
return fmt.Errorf("error svc was reachable: %v", err)
}
return nil
}
// Delete deletes the deployment and service
func (n *Nginx) Delete() error {
delPropPolicy := metav1.DeletePropagationForeground
if err := wait.PollImmediate(PollIntervalForNginx, PollTimeoutForNginx, func() (bool, error) {
if err := n.client.ExtensionsV1beta1().Deployments(n.Namespace).Delete(n.Name, &metav1.DeleteOptions{
PropagationPolicy: &delPropPolicy,
}); err != nil && !apierrors.IsNotFound(err) {
return false, nil
}
if err := n.client.CoreV1().Services(n.Namespace).Delete(n.Name, &metav1.DeleteOptions{
PropagationPolicy: &delPropPolicy,
}); err != nil && !apierrors.IsNotFound(err) {
return false, nil
}
return true, nil
}); err != nil {
return fmt.Errorf("error deleting %s deployment and serivce: %v", n.Name, err)
}
return nil
}
func (n *Nginx) newNginxDeployment() error {
var (
repl int32 = 2
cPort int32 = 80
)
d := &extensionsv1beta1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: n.Name,
Namespace: n.Namespace,
},
Spec: extensionsv1beta1.DeploymentSpec{
Replicas: &repl,
Selector: n.podSelector,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: n.podSelector.MatchLabels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "nginx:1.12-alpine",
Ports: []v1.ContainerPort{
{
ContainerPort: cPort,
},
},
},
},
NodeSelector: n.nodeSelector.MatchLabels,
},
},
},
}
if _, err := n.client.ExtensionsV1beta1().Deployments(n.Namespace).Create(d); err != nil {
return err
}
return nil
}
func (n *Nginx) newNginxService() error {
var (
cPort int32 = 80
tPort int32 = 80
)
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: n.Name,
Namespace: n.Namespace,
},
Spec: v1.ServiceSpec{
Selector: n.podSelector.MatchLabels,
Ports: []v1.ServicePort{
{
Protocol: v1.ProtocolTCP,
Port: cPort,
TargetPort: intstr.FromInt(int(tPort)),
},
},
},
}
if _, err := n.client.CoreV1().Services(n.Namespace).Create(svc); err != nil {
return err
}
return nil
}
func (n *Nginx) newPingPod(reachable bool) error {
name := fmt.Sprintf("%s-ping-job-%s", n.Name, utilrand.String(5))
deadline := int64(PollTimeoutForNginx.Seconds())
cmd := fmt.Sprintf("wget --timeout 5 %s", n.Name)
if !reachable {
cmd = fmt.Sprintf("! %s", cmd)
}
runcmd := []string{"/bin/sh", "-c", cmd}
job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: n.Namespace,
},
Spec: batchv1.JobSpec{
ActiveDeadlineSeconds: &deadline,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: n.pingPodSelector.MatchLabels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "ping-container",
Image: "alpine:3.6",
Command: runcmd,
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
},
},
},
}
if _, err := n.client.BatchV1().Jobs(n.Namespace).Create(job); err != nil {
return err
}
// wait for pod state
if err := wait.PollImmediate(PollIntervalForNginx, PollTimeoutForNginx, func() (bool, error) {
j, err := n.client.BatchV1().Jobs(n.Namespace).Get(job.GetName(), metav1.GetOptions{})
if err != nil {
glog.Errorf("Error getting job %s: %v", job.GetName(), err)
return false, nil
}
if j.Status.Succeeded < 1 {
return false, nil
}
return true, nil
}); err != nil {
return fmt.Errorf("ping job didn't succeed: %v", err)
}
return nil
}

View File

@@ -0,0 +1,128 @@
Aaron Lehmann <aaron.lehmann@docker.com>
Aaron Vinson <avinson.public@gmail.com>
Adam Enger <adamenger@gmail.com>
Adrian Mouat <adrian.mouat@gmail.com>
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
Alex Chan <alex.chan@metaswitch.com>
Alex Elman <aelman@indeed.com>
amitshukla <ashukla73@hotmail.com>
Amy Lindburg <amy.lindburg@docker.com>
Andrew Meredith <andymeredith@gmail.com>
Andrew T Nguyen <andrew.nguyen@docker.com>
Andrey Kostov <kostov.andrey@gmail.com>
Andy Goldstein <agoldste@redhat.com>
Anton Tiurin <noxiouz@yandex.ru>
Antonio Mercado <amercado@thinknode.com>
Antonio Murdaca <runcom@redhat.com>
Arnaud Porterie <arnaud.porterie@docker.com>
Arthur Baars <arthur@semmle.com>
Asuka Suzuki <hello@tanksuzuki.com>
Avi Miller <avi.miller@oracle.com>
Ayose Cazorla <ayosec@gmail.com>
BadZen <dave.trombley@gmail.com>
Ben Firshman <ben@firshman.co.uk>
bin liu <liubin0329@gmail.com>
Brian Bland <brian.bland@docker.com>
burnettk <burnettk@gmail.com>
Carson A <ca@carsonoid.net>
Chris Dillon <squarism@gmail.com>
Daisuke Fujita <dtanshi45@gmail.com>
Darren Shepherd <darren@rancher.com>
Dave Trombley <dave.trombley@gmail.com>
Dave Tucker <dt@docker.com>
David Lawrence <david.lawrence@docker.com>
David Verhasselt <david@crowdway.com>
David Xia <dxia@spotify.com>
davidli <wenquan.li@hp.com>
Dejan Golja <dejan@golja.org>
Derek McGowan <derek@mcgstyle.net>
Diogo Mónica <diogo.monica@gmail.com>
DJ Enriquez <dj.enriquez@infospace.com>
Donald Huang <don.hcd@gmail.com>
Doug Davis <dug@us.ibm.com>
Eric Yang <windfarer@gmail.com>
farmerworking <farmerworking@gmail.com>
Felix Yan <felixonmars@archlinux.org>
Florentin Raud <florentin.raud@gmail.com>
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
gabriell nascimento <gabriell@bluesoft.com.br>
harche <p.harshal@gmail.com>
Henri Gomez <henri.gomez@gmail.com>
Hu Keping <hukeping@huawei.com>
Hua Wang <wanghua.humble@gmail.com>
HuKeping <hukeping@huawei.com>
Ian Babrou <ibobrik@gmail.com>
igayoso <igayoso@gmail.com>
Jack Griffin <jackpg14@gmail.com>
Jason Freidman <jason.freidman@gmail.com>
Jeff Nickoloff <jeff@allingeek.com>
Jessie Frazelle <jessie@docker.com>
Jianqing Wang <tsing@jianqing.org>
John Starks <jostarks@microsoft.com>
Jon Poler <jonathan.poler@apcera.com>
Jonathan Boulle <jonathanboulle@gmail.com>
Jordan Liggitt <jliggitt@redhat.com>
Josh Hawn <josh.hawn@docker.com>
Julien Fernandez <julien.fernandez@gmail.com>
Keerthan Mala <kmala@engineyard.com>
Kelsey Hightower <kelsey.hightower@gmail.com>
Kenneth Lim <kennethlimcp@gmail.com>
Kenny Leung <kleung@google.com>
Li Yi <denverdino@gmail.com>
Liu Hua <sdu.liu@huawei.com>
liuchang0812 <liuchang0812@gmail.com>
Louis Kottmann <louis.kottmann@gmail.com>
Luke Carpenter <x@rubynerd.net>
Mary Anthony <mary@docker.com>
Matt Bentley <mbentley@mbentley.net>
Matt Duch <matt@learnmetrics.com>
Matt Moore <mattmoor@google.com>
Matt Robenolt <matt@ydekproductions.com>
Michael Prokop <mika@grml.org>
Michal Minar <miminar@redhat.com>
Miquel Sabaté <msabate@suse.com>
Morgan Bauer <mbauer@us.ibm.com>
moxiegirl <mary@docker.com>
Nathan Sullivan <nathan@nightsys.net>
nevermosby <robolwq@qq.com>
Nghia Tran <tcnghia@gmail.com>
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
Oilbeater <liumengxinfly@gmail.com>
Olivier Gambier <olivier@docker.com>
Olivier Jacques <olivier.jacques@hp.com>
Omer Cohen <git@omer.io>
Patrick Devine <patrick.devine@docker.com>
Philip Misiowiec <philip@atlashealth.com>
Richard Scothern <richard.scothern@docker.com>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Rusty Conover <rusty@luckydinosaur.com>
Sean Boran <Boran@users.noreply.github.com>
Sebastiaan van Stijn <github@gone.nl>
Sharif Nassar <sharif@mrwacky.com>
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
Shreyas Karnik <karnik.shreyas@gmail.com>
Simon Thulbourn <simon+github@thulbourn.com>
Spencer Rinehart <anubis@overthemonkey.com>
Stefan Weil <sw@weilnetz.de>
Stephen J Day <stephen.day@docker.com>
Sungho Moon <sungho.moon@navercorp.com>
Sven Dowideit <SvenDowideit@home.org.au>
Sylvain Baubeau <sbaubeau@redhat.com>
Ted Reed <ted.reed@gmail.com>
tgic <farmer1992@gmail.com>
Thomas Sjögren <konstruktoid@users.noreply.github.com>
Tianon Gravi <admwiggin@gmail.com>
Tibor Vass <teabee89@gmail.com>
Tonis Tiigi <tonistiigi@gmail.com>
Trevor Pounds <trevor.pounds@gmail.com>
Troels Thomsen <troels@thomsen.io>
Vincent Batts <vbatts@redhat.com>
Vincent Demeester <vincent@sbr.pm>
Vincent Giersch <vincent.giersch@ovh.net>
W. Trevor King <wking@tremily.us>
weiyuan.yl <weiyuan.yl@alibaba-inc.com>
xg.song <xg.song@venusource.com>
xiekeyang <xiekeyang@huawei.com>
Yann ROBERT <yann.robert@anantaplex.fr>
yuzou <zouyu7@huawei.com>
姜继忠 <jizhong.jiangjz@alibaba-inc.com>

View File

@@ -1,237 +0,0 @@
package distribution
import (
"errors"
"fmt"
"io"
"net/http"
"time"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
)
var (
// ErrBlobExists returned when blob already exists
ErrBlobExists = errors.New("blob exists")
// ErrBlobDigestUnsupported when blob digest is an unsupported version.
ErrBlobDigestUnsupported = errors.New("unsupported blob digest")
// ErrBlobUnknown when blob is not found.
ErrBlobUnknown = errors.New("unknown blob")
// ErrBlobUploadUnknown returned when upload is not found.
ErrBlobUploadUnknown = errors.New("blob upload unknown")
// ErrBlobInvalidLength returned when the blob has an expected length on
// commit, meaning mismatched with the descriptor or an invalid value.
ErrBlobInvalidLength = errors.New("blob invalid length")
)
// ErrBlobInvalidDigest returned when digest check fails.
type ErrBlobInvalidDigest struct {
Digest digest.Digest
Reason error
}
func (err ErrBlobInvalidDigest) Error() string {
return fmt.Sprintf("invalid digest for referenced layer: %v, %v",
err.Digest, err.Reason)
}
// ErrBlobMounted returned when a blob is mounted from another repository
// instead of initiating an upload session.
type ErrBlobMounted struct {
From reference.Canonical
Descriptor Descriptor
}
func (err ErrBlobMounted) Error() string {
return fmt.Sprintf("blob mounted from: %v to: %v",
err.From, err.Descriptor)
}
// Descriptor describes targeted content. Used in conjunction with a blob
// store, a descriptor can be used to fetch, store and target any kind of
// blob. The struct also describes the wire protocol format. Fields should
// only be added but never changed.
type Descriptor struct {
// MediaType describe the type of the content. All text based formats are
// encoded as utf-8.
MediaType string `json:"mediaType,omitempty"`
// Size in bytes of content.
Size int64 `json:"size,omitempty"`
// Digest uniquely identifies the content. A byte stream can be verified
// against against this digest.
Digest digest.Digest `json:"digest,omitempty"`
// NOTE: Before adding a field here, please ensure that all
// other options have been exhausted. Much of the type relationships
// depend on the simplicity of this type.
}
// Descriptor returns the descriptor, to make it satisfy the Describable
// interface. Note that implementations of Describable are generally objects
// which can be described, not simply descriptors; this exception is in place
// to make it more convenient to pass actual descriptors to functions that
// expect Describable objects.
func (d Descriptor) Descriptor() Descriptor {
return d
}
// BlobStatter makes blob descriptors available by digest. The service may
// provide a descriptor of a different digest if the provided digest is not
// canonical.
type BlobStatter interface {
// Stat provides metadata about a blob identified by the digest. If the
// blob is unknown to the describer, ErrBlobUnknown will be returned.
Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error)
}
// BlobDeleter enables deleting blobs from storage.
type BlobDeleter interface {
Delete(ctx context.Context, dgst digest.Digest) error
}
// BlobEnumerator enables iterating over blobs from storage
type BlobEnumerator interface {
Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error
}
// BlobDescriptorService manages metadata about a blob by digest. Most
// implementations will not expose such an interface explicitly. Such mappings
// should be maintained by interacting with the BlobIngester. Hence, this is
// left off of BlobService and BlobStore.
type BlobDescriptorService interface {
BlobStatter
// SetDescriptor assigns the descriptor to the digest. The provided digest and
// the digest in the descriptor must map to identical content but they may
// differ on their algorithm. The descriptor must have the canonical
// digest of the content and the digest algorithm must match the
// annotators canonical algorithm.
//
// Such a facility can be used to map blobs between digest domains, with
// the restriction that the algorithm of the descriptor must match the
// canonical algorithm (ie sha256) of the annotator.
SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error
// Clear enables descriptors to be unlinked
Clear(ctx context.Context, dgst digest.Digest) error
}
// ReadSeekCloser is the primary reader type for blob data, combining
// io.ReadSeeker with io.Closer.
type ReadSeekCloser interface {
io.ReadSeeker
io.Closer
}
// BlobProvider describes operations for getting blob data.
type BlobProvider interface {
// Get returns the entire blob identified by digest along with the descriptor.
Get(ctx context.Context, dgst digest.Digest) ([]byte, error)
// Open provides a ReadSeekCloser to the blob identified by the provided
// descriptor. If the blob is not known to the service, an error will be
// returned.
Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error)
}
// BlobServer can serve blobs via http.
type BlobServer interface {
// ServeBlob attempts to serve the blob, identifed by dgst, via http. The
// service may decide to redirect the client elsewhere or serve the data
// directly.
//
// This handler only issues successful responses, such as 2xx or 3xx,
// meaning it serves data or issues a redirect. If the blob is not
// available, an error will be returned and the caller may still issue a
// response.
//
// The implementation may serve the same blob from a different digest
// domain. The appropriate headers will be set for the blob, unless they
// have already been set by the caller.
ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error
}
// BlobIngester ingests blob data.
type BlobIngester interface {
// Put inserts the content p into the blob service, returning a descriptor
// or an error.
Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error)
// Create allocates a new blob writer to add a blob to this service. The
// returned handle can be written to and later resumed using an opaque
// identifier. With this approach, one can Close and Resume a BlobWriter
// multiple times until the BlobWriter is committed or cancelled.
Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
// Resume attempts to resume a write to a blob, identified by an id.
Resume(ctx context.Context, id string) (BlobWriter, error)
}
// BlobCreateOption is a general extensible function argument for blob creation
// methods. A BlobIngester may choose to honor any or none of the given
// BlobCreateOptions, which can be specific to the implementation of the
// BlobIngester receiving them.
// TODO (brianbland): unify this with ManifestServiceOption in the future
type BlobCreateOption interface {
Apply(interface{}) error
}
// BlobWriter provides a handle for inserting data into a blob store.
// Instances should be obtained from BlobWriteService.Writer and
// BlobWriteService.Resume. If supported by the store, a writer can be
// recovered with the id.
type BlobWriter interface {
io.WriteCloser
io.ReaderFrom
// Size returns the number of bytes written to this blob.
Size() int64
// ID returns the identifier for this writer. The ID can be used with the
// Blob service to later resume the write.
ID() string
// StartedAt returns the time this blob write was started.
StartedAt() time.Time
// Commit completes the blob writer process. The content is verified
// against the provided provisional descriptor, which may result in an
// error. Depending on the implementation, written data may be validated
// against the provisional descriptor fields. If MediaType is not present,
// the implementation may reject the commit or assign "application/octet-
// stream" to the blob. The returned descriptor may have a different
// digest depending on the blob store, referred to as the canonical
// descriptor.
Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error)
// Cancel ends the blob write without storing any data and frees any
// associated resources. Any data written thus far will be lost. Cancel
// implementations should allow multiple calls even after a commit that
// result in a no-op. This allows use of Cancel in a defer statement,
// increasing the assurance that it is correctly called.
Cancel(ctx context.Context) error
}
// BlobService combines the operations to access, read and write blobs. This
// can be used to describe remote blob services.
type BlobService interface {
BlobStatter
BlobProvider
BlobIngester
}
// BlobStore represent the entire suite of blob related operations. Such an
// implementation can access, read, write, delete and serve blobs.
type BlobStore interface {
BlobService
BlobServer
BlobDeleter
}

View File

@@ -1,7 +0,0 @@
// Package distribution will define the interfaces for the components of
// docker distribution. The goal is to allow users to reliably package, ship
// and store content related to docker images.
//
// This is currently a work in progress. More details are available in the
// README.md.
package distribution

View File

@@ -1,115 +0,0 @@
package distribution
import (
"errors"
"fmt"
"strings"
"github.com/docker/distribution/digest"
)
// ErrAccessDenied is returned when an access to a requested resource is
// denied.
var ErrAccessDenied = errors.New("access denied")
// ErrManifestNotModified is returned when a conditional manifest GetByTag
// returns nil due to the client indicating it has the latest version
var ErrManifestNotModified = errors.New("manifest not modified")
// ErrUnsupported is returned when an unimplemented or unsupported action is
// performed
var ErrUnsupported = errors.New("operation unsupported")
// ErrTagUnknown is returned if the given tag is not known by the tag service
type ErrTagUnknown struct {
Tag string
}
func (err ErrTagUnknown) Error() string {
return fmt.Sprintf("unknown tag=%s", err.Tag)
}
// ErrRepositoryUnknown is returned if the named repository is not known by
// the registry.
type ErrRepositoryUnknown struct {
Name string
}
func (err ErrRepositoryUnknown) Error() string {
return fmt.Sprintf("unknown repository name=%s", err.Name)
}
// ErrRepositoryNameInvalid should be used to denote an invalid repository
// name. Reason may set, indicating the cause of invalidity.
type ErrRepositoryNameInvalid struct {
Name string
Reason error
}
func (err ErrRepositoryNameInvalid) Error() string {
return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason)
}
// ErrManifestUnknown is returned if the manifest is not known by the
// registry.
type ErrManifestUnknown struct {
Name string
Tag string
}
func (err ErrManifestUnknown) Error() string {
return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag)
}
// ErrManifestUnknownRevision is returned when a manifest cannot be found by
// revision within a repository.
type ErrManifestUnknownRevision struct {
Name string
Revision digest.Digest
}
func (err ErrManifestUnknownRevision) Error() string {
return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision)
}
// ErrManifestUnverified is returned when the registry is unable to verify
// the manifest.
type ErrManifestUnverified struct{}
func (ErrManifestUnverified) Error() string {
return fmt.Sprintf("unverified manifest")
}
// ErrManifestVerification provides a type to collect errors encountered
// during manifest verification. Currently, it accepts errors of all types,
// but it may be narrowed to those involving manifest verification.
type ErrManifestVerification []error
func (errs ErrManifestVerification) Error() string {
var parts []string
for _, err := range errs {
parts = append(parts, err.Error())
}
return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ","))
}
// ErrManifestBlobUnknown returned when a referenced blob cannot be found.
type ErrManifestBlobUnknown struct {
Digest digest.Digest
}
func (err ErrManifestBlobUnknown) Error() string {
return fmt.Sprintf("unknown blob %v on manifest", err.Digest)
}
// ErrManifestNameInvalid should be used to denote an invalid manifest
// name. Reason may set, indicating the cause of invalidity.
type ErrManifestNameInvalid struct {
Name string
Reason error
}
func (err ErrManifestNameInvalid) Error() string {
return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason)
}

View File

@@ -1,123 +0,0 @@
package distribution
import (
"fmt"
"mime"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
)
// Manifest represents a registry object specifying a set of
// references and an optional target
type Manifest interface {
// References returns a list of objects which make up this manifest.
// The references are strictly ordered from base to head. A reference
// is anything which can be represented by a distribution.Descriptor
References() []Descriptor
// Payload provides the serialized format of the manifest, in addition to
// the mediatype.
Payload() (mediatype string, payload []byte, err error)
}
// ManifestBuilder creates a manifest allowing one to include dependencies.
// Instances can be obtained from a version-specific manifest package. Manifest
// specific data is passed into the function which creates the builder.
type ManifestBuilder interface {
// Build creates the manifest from his builder.
Build(ctx context.Context) (Manifest, error)
// References returns a list of objects which have been added to this
// builder. The dependencies are returned in the order they were added,
// which should be from base to head.
References() []Descriptor
// AppendReference includes the given object in the manifest after any
// existing dependencies. If the add fails, such as when adding an
// unsupported dependency, an error may be returned.
AppendReference(dependency Describable) error
}
// ManifestService describes operations on image manifests.
type ManifestService interface {
// Exists returns true if the manifest exists.
Exists(ctx context.Context, dgst digest.Digest) (bool, error)
// Get retrieves the manifest specified by the given digest
Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error)
// Put creates or updates the given manifest returning the manifest digest
Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error)
// Delete removes the manifest specified by the given digest. Deleting
// a manifest that doesn't exist will return ErrManifestNotFound
Delete(ctx context.Context, dgst digest.Digest) error
}
// ManifestEnumerator enables iterating over manifests
type ManifestEnumerator interface {
// Enumerate calls ingester for each manifest.
Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
}
// SignaturesGetter provides an interface for getting the signatures of a schema1 manifest. If the digest
// referred to is not a schema1 manifest, an error should be returned.
type SignaturesGetter interface {
GetSignatures(ctx context.Context, manifestDigest digest.Digest) ([]digest.Digest, error)
}
// Describable is an interface for descriptors
type Describable interface {
Descriptor() Descriptor
}
// ManifestMediaTypes returns the supported media types for manifests.
func ManifestMediaTypes() (mediaTypes []string) {
for t := range mappings {
if t != "" {
mediaTypes = append(mediaTypes, t)
}
}
return
}
// UnmarshalFunc implements manifest unmarshalling a given MediaType
type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
var mappings = make(map[string]UnmarshalFunc, 0)
// UnmarshalManifest looks up manifest unmarshal functions based on
// MediaType
func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) {
// Need to look up by the actual media type, not the raw contents of
// the header. Strip semicolons and anything following them.
var mediatype string
if ctHeader != "" {
var err error
mediatype, _, err = mime.ParseMediaType(ctHeader)
if err != nil {
return nil, Descriptor{}, err
}
}
unmarshalFunc, ok := mappings[mediatype]
if !ok {
unmarshalFunc, ok = mappings[""]
if !ok {
return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype)
}
}
return unmarshalFunc(p)
}
// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This
// should be called from specific
func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error {
if _, ok := mappings[mediatype]; ok {
return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype)
}
mappings[mediatype] = u
return nil
}

View File

@@ -1,97 +0,0 @@
package distribution
import (
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
)
// Scope defines the set of items that match a namespace.
type Scope interface {
// Contains returns true if the name belongs to the namespace.
Contains(name string) bool
}
type fullScope struct{}
func (f fullScope) Contains(string) bool {
return true
}
// GlobalScope represents the full namespace scope which contains
// all other scopes.
var GlobalScope = Scope(fullScope{})
// Namespace represents a collection of repositories, addressable by name.
// Generally, a namespace is backed by a set of one or more services,
// providing facilities such as registry access, trust, and indexing.
type Namespace interface {
// Scope describes the names that can be used with this Namespace. The
// global namespace will have a scope that matches all names. The scope
// effectively provides an identity for the namespace.
Scope() Scope
// Repository should return a reference to the named repository. The
// registry may or may not have the repository but should always return a
// reference.
Repository(ctx context.Context, name reference.Named) (Repository, error)
// Repositories fills 'repos' with a lexigraphically sorted catalog of repositories
// up to the size of 'repos' and returns the value 'n' for the number of entries
// which were filled. 'last' contains an offset in the catalog, and 'err' will be
// set to io.EOF if there are no more entries to obtain.
Repositories(ctx context.Context, repos []string, last string) (n int, err error)
// Blobs returns a blob enumerator to access all blobs
Blobs() BlobEnumerator
// BlobStatter returns a BlobStatter to control
BlobStatter() BlobStatter
}
// RepositoryEnumerator describes an operation to enumerate repositories
type RepositoryEnumerator interface {
Enumerate(ctx context.Context, ingester func(string) error) error
}
// ManifestServiceOption is a function argument for Manifest Service methods
type ManifestServiceOption interface {
Apply(ManifestService) error
}
// WithTag allows a tag to be passed into Put
func WithTag(tag string) ManifestServiceOption {
return WithTagOption{tag}
}
// WithTagOption holds a tag
type WithTagOption struct{ Tag string }
// Apply conforms to the ManifestServiceOption interface
func (o WithTagOption) Apply(m ManifestService) error {
// no implementation
return nil
}
// Repository is a named collection of manifests and layers.
type Repository interface {
// Named returns the name of the repository.
Named() reference.Named
// Manifests returns a reference to this repository's manifest service.
// with the supplied options applied.
Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error)
// Blobs returns a reference to this repository's blob service.
Blobs(ctx context.Context) BlobStore
// TODO(stevvooe): The above BlobStore return can probably be relaxed to
// be a BlobService for use with clients. This will allow such
// implementations to avoid implementing ServeBlob.
// Tags returns a reference to this repositories tag service
Tags(ctx context.Context) TagService
}
// TODO(stevvooe): Must add close methods to all these. May want to change the
// way instances are created to better reflect internal dependency
// relationships.

View File

@@ -1,27 +0,0 @@
package distribution
import (
"github.com/docker/distribution/context"
)
// TagService provides access to information about tagged objects.
type TagService interface {
// Get retrieves the descriptor identified by the tag. Some
// implementations may differentiate between "trusted" tags and
// "untrusted" tags. If a tag is "untrusted", the mapping will be returned
// as an ErrTagUntrusted error, with the target descriptor.
Get(ctx context.Context, tag string) (Descriptor, error)
// Tag associates the tag with the provided descriptor, updating the
// current association, if needed.
Tag(ctx context.Context, tag string, desc Descriptor) error
// Untag removes the given tag association
Untag(ctx context.Context, tag string) error
// All returns the set of tags managed by this tag service
All(ctx context.Context) ([]string, error)
// Lookup returns the set of tags referencing the given digest.
Lookup(ctx context.Context, digest Descriptor) ([]string, error)
}

View File

@@ -1,141 +0,0 @@
package client
import (
"fmt"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/docker/engine-api/client/transport"
"github.com/docker/go-connections/tlsconfig"
)
// Client is the API client that performs all operations
// against a docker server.
type Client struct {
// proto holds the client protocol i.e. unix.
proto string
// addr holds the client address.
addr string
// basePath holds the path to prepend to the requests.
basePath string
// transport is the interface to send request with, it implements transport.Client.
transport transport.Client
// version of the server to talk to.
version string
// custom http headers configured by users.
customHTTPHeaders map[string]string
}
// NewEnvClient initializes a new API client based on environment variables.
// Use DOCKER_HOST to set the url to the docker server.
// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest.
// Use DOCKER_CERT_PATH to load the tls certificates from.
// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
func NewEnvClient() (*Client, error) {
var client *http.Client
if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" {
options := tlsconfig.Options{
CAFile: filepath.Join(dockerCertPath, "ca.pem"),
CertFile: filepath.Join(dockerCertPath, "cert.pem"),
KeyFile: filepath.Join(dockerCertPath, "key.pem"),
InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "",
}
tlsc, err := tlsconfig.Client(options)
if err != nil {
return nil, err
}
client = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsc,
},
}
}
host := os.Getenv("DOCKER_HOST")
if host == "" {
host = DefaultDockerHost
}
return NewClient(host, os.Getenv("DOCKER_API_VERSION"), client, nil)
}
// NewClient initializes a new API client for the given host and API version.
// It won't send any version information if the version number is empty.
// It uses the given http client as transport.
// It also initializes the custom http headers to add to each request.
func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
proto, addr, basePath, err := ParseHost(host)
if err != nil {
return nil, err
}
transport, err := transport.NewTransportWithHTTP(proto, addr, client)
if err != nil {
return nil, err
}
return &Client{
proto: proto,
addr: addr,
basePath: basePath,
transport: transport,
version: version,
customHTTPHeaders: httpHeaders,
}, nil
}
// getAPIPath returns the versioned request path to call the api.
// It appends the query parameters to the path if they are not empty.
func (cli *Client) getAPIPath(p string, query url.Values) string {
var apiPath string
if cli.version != "" {
v := strings.TrimPrefix(cli.version, "v")
apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p)
} else {
apiPath = fmt.Sprintf("%s%s", cli.basePath, p)
}
u := &url.URL{
Path: apiPath,
}
if len(query) > 0 {
u.RawQuery = query.Encode()
}
return u.String()
}
// ClientVersion returns the version string associated with this
// instance of the Client. Note that this value can be changed
// via the DOCKER_API_VERSION env var.
func (cli *Client) ClientVersion() string {
return cli.version
}
// UpdateClientVersion updates the version string associated with this
// instance of the Client.
func (cli *Client) UpdateClientVersion(v string) {
cli.version = v
}
// ParseHost verifies that the given host strings is valid.
func ParseHost(host string) (string, string, string, error) {
protoAddrParts := strings.SplitN(host, "://", 2)
if len(protoAddrParts) == 1 {
return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host)
}
var basePath string
proto, addr := protoAddrParts[0], protoAddrParts[1]
if proto == "tcp" {
parsed, err := url.Parse("tcp://" + addr)
if err != nil {
return "", "", "", err
}
addr = parsed.Host
basePath = parsed.Path
}
return proto, addr, basePath, nil
}

View File

@@ -1,4 +0,0 @@
package client
// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
const DefaultDockerHost = "tcp://127.0.0.1:2375"

View File

@@ -1,6 +0,0 @@
// +build linux freebsd solaris openbsd
package client
// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
const DefaultDockerHost = "unix:///var/run/docker.sock"

View File

@@ -1,4 +0,0 @@
package client
// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
const DefaultDockerHost = "npipe:////./pipe/docker_engine"

View File

@@ -1,34 +0,0 @@
package client
import (
"net/url"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// ContainerAttach attaches a connection to a container in the server.
// It returns a types.HijackedConnection with the hijacked connection
// and the a reader to get output. It's up to the called to close
// the hijacked connection by calling types.HijackedResponse.Close.
func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) {
query := url.Values{}
if options.Stream {
query.Set("stream", "1")
}
if options.Stdin {
query.Set("stdin", "1")
}
if options.Stdout {
query.Set("stdout", "1")
}
if options.Stderr {
query.Set("stderr", "1")
}
if options.DetachKeys != "" {
query.Set("detachKeys", options.DetachKeys)
}
headers := map[string][]string{"Content-Type": {"text/plain"}}
return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers)
}

View File

@@ -1,53 +0,0 @@
package client
import (
"encoding/json"
"errors"
"net/url"
distreference "github.com/docker/distribution/reference"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/reference"
"golang.org/x/net/context"
)
// ContainerCommit applies changes into a container and creates a new tagged image.
func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) {
var repository, tag string
if options.Reference != "" {
distributionRef, err := distreference.ParseNamed(options.Reference)
if err != nil {
return types.ContainerCommitResponse{}, err
}
if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical {
return types.ContainerCommitResponse{}, errors.New("refusing to create a tag with a digest reference")
}
tag = reference.GetTagFromNamedRef(distributionRef)
repository = distributionRef.Name()
}
query := url.Values{}
query.Set("container", container)
query.Set("repo", repository)
query.Set("tag", tag)
query.Set("comment", options.Comment)
query.Set("author", options.Author)
for _, change := range options.Changes {
query.Add("changes", change)
}
if options.Pause != true {
query.Set("pause", "0")
}
var response types.ContainerCommitResponse
resp, err := cli.post(ctx, "/commit", query, options.Config, nil)
if err != nil {
return response, err
}
err = json.NewDecoder(resp.body).Decode(&response)
ensureReaderClosed(resp)
return response, err
}

View File

@@ -1,97 +0,0 @@
package client
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"path/filepath"
"strings"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
)
// ContainerStatPath returns Stat information about a path inside the container filesystem.
func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) {
query := url.Values{}
query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
urlStr := fmt.Sprintf("/containers/%s/archive", containerID)
response, err := cli.head(ctx, urlStr, query, nil)
if err != nil {
return types.ContainerPathStat{}, err
}
defer ensureReaderClosed(response)
return getContainerPathStatFromHeader(response.header)
}
// CopyToContainer copies content into the container filesystem.
func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error {
query := url.Values{}
query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
// Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
if !options.AllowOverwriteDirWithFile {
query.Set("noOverwriteDirNonDir", "true")
}
apiPath := fmt.Sprintf("/containers/%s/archive", container)
response, err := cli.putRaw(ctx, apiPath, query, content, nil)
if err != nil {
return err
}
defer ensureReaderClosed(response)
if response.statusCode != http.StatusOK {
return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
}
return nil
}
// CopyFromContainer gets the content from the container and returns it as a Reader
// to manipulate it in the host. It's up to the caller to close the reader.
func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
query := make(url.Values, 1)
query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
apiPath := fmt.Sprintf("/containers/%s/archive", container)
response, err := cli.get(ctx, apiPath, query, nil)
if err != nil {
return nil, types.ContainerPathStat{}, err
}
if response.statusCode != http.StatusOK {
return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
}
// In order to get the copy behavior right, we need to know information
// about both the source and the destination. The response headers include
// stat info about the source that we can use in deciding exactly how to
// copy it locally. Along with the stat info about the local destination,
// we have everything we need to handle the multiple possibilities there
// can be when copying a file/dir from one location to another file/dir.
stat, err := getContainerPathStatFromHeader(response.header)
if err != nil {
return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err)
}
return response.body, stat, err
}
func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) {
var stat types.ContainerPathStat
encodedStat := header.Get("X-Docker-Container-Path-Stat")
statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat))
err := json.NewDecoder(statDecoder).Decode(&stat)
if err != nil {
err = fmt.Errorf("unable to decode container path stat header: %s", err)
}
return stat, err
}

View File

@@ -1,46 +0,0 @@
package client
import (
"encoding/json"
"net/url"
"strings"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/container"
"github.com/docker/engine-api/types/network"
"golang.org/x/net/context"
)
type configWrapper struct {
*container.Config
HostConfig *container.HostConfig
NetworkingConfig *network.NetworkingConfig
}
// ContainerCreate creates a new container based in the given configuration.
// It can be associated with a name, but it's not mandatory.
func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) {
var response types.ContainerCreateResponse
query := url.Values{}
if containerName != "" {
query.Set("name", containerName)
}
body := configWrapper{
Config: config,
HostConfig: hostConfig,
NetworkingConfig: networkingConfig,
}
serverResp, err := cli.post(ctx, "/containers/create", query, body, nil)
if err != nil {
if serverResp != nil && serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") {
return response, imageNotFoundError{config.Image}
}
return response, err
}
err = json.NewDecoder(serverResp.body).Decode(&response)
ensureReaderClosed(serverResp)
return response, err
}

View File

@@ -1,23 +0,0 @@
package client
import (
"encoding/json"
"net/url"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// ContainerDiff shows differences in a container filesystem since it was started.
func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) {
var changes []types.ContainerChange
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil)
if err != nil {
return changes, err
}
err = json.NewDecoder(serverResp.body).Decode(&changes)
ensureReaderClosed(serverResp)
return changes, err
}

View File

@@ -1,49 +0,0 @@
package client
import (
"encoding/json"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// ContainerExecCreate creates a new exec configuration to run an exec process.
func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) {
var response types.ContainerExecCreateResponse
resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil)
if err != nil {
return response, err
}
err = json.NewDecoder(resp.body).Decode(&response)
ensureReaderClosed(resp)
return response, err
}
// ContainerExecStart starts an exec process already created in the docker host.
func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error {
resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil)
ensureReaderClosed(resp)
return err
}
// ContainerExecAttach attaches a connection to an exec process in the server.
// It returns a types.HijackedConnection with the hijacked connection
// and the a reader to get output. It's up to the called to close
// the hijacked connection by calling types.HijackedResponse.Close.
func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) {
headers := map[string][]string{"Content-Type": {"application/json"}}
return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers)
}
// ContainerExecInspect returns information about a specific exec process on the docker host.
func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) {
var response types.ContainerExecInspect
resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil)
if err != nil {
return response, err
}
err = json.NewDecoder(resp.body).Decode(&response)
ensureReaderClosed(resp)
return response, err
}

View File

@@ -1,20 +0,0 @@
package client
import (
"io"
"net/url"
"golang.org/x/net/context"
)
// ContainerExport retrieves the raw contents of a container
// and returns them as an io.ReadCloser. It's up to the caller
// to close the stream.
func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) {
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil)
if err != nil {
return nil, err
}
return serverResp.body, nil
}

View File

@@ -1,65 +0,0 @@
package client
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// ContainerInspect returns the container information.
func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
if err != nil {
if serverResp.statusCode == http.StatusNotFound {
return types.ContainerJSON{}, containerNotFoundError{containerID}
}
return types.ContainerJSON{}, err
}
var response types.ContainerJSON
err = json.NewDecoder(serverResp.body).Decode(&response)
ensureReaderClosed(serverResp)
return response, err
}
// ContainerInspectWithRaw returns the container information and it's raw representation.
func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) {
query := url.Values{}
if getSize {
query.Set("size", "1")
}
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
if err != nil {
if serverResp.statusCode == http.StatusNotFound {
return types.ContainerJSON{}, nil, containerNotFoundError{containerID}
}
return types.ContainerJSON{}, nil, err
}
defer ensureReaderClosed(serverResp)
body, err := ioutil.ReadAll(serverResp.body)
if err != nil {
return types.ContainerJSON{}, nil, err
}
var response types.ContainerJSON
rdr := bytes.NewReader(body)
err = json.NewDecoder(rdr).Decode(&response)
return response, body, err
}
func (cli *Client) containerInspectWithResponse(ctx context.Context, containerID string, query url.Values) (types.ContainerJSON, *serverResponse, error) {
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
if err != nil {
return types.ContainerJSON{}, serverResp, err
}
var response types.ContainerJSON
err = json.NewDecoder(serverResp.body).Decode(&response)
return response, serverResp, err
}

View File

@@ -1,17 +0,0 @@
package client
import (
"net/url"
"golang.org/x/net/context"
)
// ContainerKill terminates the container process but does not remove the container from the docker host.
func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error {
query := url.Values{}
query.Set("signal", signal)
resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@@ -1,56 +0,0 @@
package client
import (
"encoding/json"
"net/url"
"strconv"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
"golang.org/x/net/context"
)
// ContainerList returns the list of containers in the docker host.
func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
query := url.Values{}
if options.All {
query.Set("all", "1")
}
if options.Limit != -1 {
query.Set("limit", strconv.Itoa(options.Limit))
}
if options.Since != "" {
query.Set("since", options.Since)
}
if options.Before != "" {
query.Set("before", options.Before)
}
if options.Size {
query.Set("size", "1")
}
if options.Filter.Len() > 0 {
filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filter)
if err != nil {
return nil, err
}
query.Set("filters", filterJSON)
}
resp, err := cli.get(ctx, "/containers/json", query, nil)
if err != nil {
return nil, err
}
var containers []types.Container
err = json.NewDecoder(resp.body).Decode(&containers)
ensureReaderClosed(resp)
return containers, err
}

View File

@@ -1,52 +0,0 @@
package client
import (
"io"
"net/url"
"time"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
timetypes "github.com/docker/engine-api/types/time"
)
// ContainerLogs returns the logs generated by a container in an io.ReadCloser.
// It's up to the caller to close the stream.
func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
query := url.Values{}
if options.ShowStdout {
query.Set("stdout", "1")
}
if options.ShowStderr {
query.Set("stderr", "1")
}
if options.Since != "" {
ts, err := timetypes.GetTimestamp(options.Since, time.Now())
if err != nil {
return nil, err
}
query.Set("since", ts)
}
if options.Timestamps {
query.Set("timestamps", "1")
}
if options.Details {
query.Set("details", "1")
}
if options.Follow {
query.Set("follow", "1")
}
query.Set("tail", options.Tail)
resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil)
if err != nil {
return nil, err
}
return resp.body, nil
}

View File

@@ -1,10 +0,0 @@
package client
import "golang.org/x/net/context"
// ContainerPause pauses the main process of a given container without terminating it.
func (cli *Client) ContainerPause(ctx context.Context, containerID string) error {
resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@@ -1,27 +0,0 @@
package client
import (
"net/url"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// ContainerRemove kills and removes a container from the docker host.
func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error {
query := url.Values{}
if options.RemoveVolumes {
query.Set("v", "1")
}
if options.RemoveLinks {
query.Set("link", "1")
}
if options.Force {
query.Set("force", "1")
}
resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
ensureReaderClosed(resp)
return err
}

View File

@@ -1,16 +0,0 @@
package client
import (
"net/url"
"golang.org/x/net/context"
)
// ContainerRename changes the name of a given container.
func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error {
query := url.Values{}
query.Set("name", newContainerName)
resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@@ -1,29 +0,0 @@
package client
import (
"net/url"
"strconv"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// ContainerResize changes the size of the tty for a container.
func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error {
return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width)
}
// ContainerExecResize changes the size of the tty for an exec process running inside a container.
func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error {
return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width)
}
func (cli *Client) resize(ctx context.Context, basePath string, height, width int) error {
query := url.Values{}
query.Set("h", strconv.Itoa(height))
query.Set("w", strconv.Itoa(width))
resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@@ -1,19 +0,0 @@
package client
import (
"net/url"
"strconv"
"golang.org/x/net/context"
)
// ContainerRestart stops and starts a container again.
// It makes the daemon to wait for the container to be up again for
// a specific amount of time, given the timeout.
func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout int) error {
query := url.Values{}
query.Set("t", strconv.Itoa(timeout))
resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@@ -1,10 +0,0 @@
package client
import "golang.org/x/net/context"
// ContainerStart sends a request to the docker daemon to start a container.
func (cli *Client) ContainerStart(ctx context.Context, containerID string) error {
resp, err := cli.post(ctx, "/containers/"+containerID+"/start", nil, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@@ -1,24 +0,0 @@
package client
import (
"io"
"net/url"
"golang.org/x/net/context"
)
// ContainerStats returns near realtime stats for a given container.
// It's up to the caller to close the io.ReadCloser returned.
func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) {
query := url.Values{}
query.Set("stream", "0")
if stream {
query.Set("stream", "1")
}
resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
if err != nil {
return nil, err
}
return resp.body, err
}

View File

@@ -1,18 +0,0 @@
package client
import (
"net/url"
"strconv"
"golang.org/x/net/context"
)
// ContainerStop stops a container without terminating the process.
// The process is blocked until the container stops or the timeout expires.
func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout int) error {
query := url.Values{}
query.Set("t", strconv.Itoa(timeout))
resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@@ -1,28 +0,0 @@
package client
import (
"encoding/json"
"net/url"
"strings"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// ContainerTop shows process information from within a container.
func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) {
var response types.ContainerProcessList
query := url.Values{}
if len(arguments) > 0 {
query.Set("ps_args", strings.Join(arguments, " "))
}
resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil)
if err != nil {
return response, err
}
err = json.NewDecoder(resp.body).Decode(&response)
ensureReaderClosed(resp)
return response, err
}

View File

@@ -1,10 +0,0 @@
package client
import "golang.org/x/net/context"
// ContainerUnpause resumes the process execution within a container
func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error {
resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@@ -1,13 +0,0 @@
package client
import (
"github.com/docker/engine-api/types/container"
"golang.org/x/net/context"
)
// ContainerUpdate updates resources of a container
func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) error {
resp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil)
ensureReaderClosed(resp)
return err
}

View File

@@ -1,26 +0,0 @@
package client
import (
"encoding/json"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
)
// ContainerWait pauses execution until a container exits.
// It returns the API status code as response of its readiness.
func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int, error) {
resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil)
if err != nil {
return -1, err
}
defer ensureReaderClosed(resp)
var res types.ContainerWaitResponse
if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
return -1, err
}
return res.StatusCode, nil
}

View File

@@ -1,94 +0,0 @@
package client
import (
"errors"
"fmt"
)
// ErrConnectionFailed is an error raised when the connection between the client and the server failed.
var ErrConnectionFailed = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?")
// imageNotFoundError implements an error returned when an image is not in the docker host.
type imageNotFoundError struct {
imageID string
}
// Error returns a string representation of an imageNotFoundError
func (i imageNotFoundError) Error() string {
return fmt.Sprintf("Error: No such image: %s", i.imageID)
}
// IsErrImageNotFound returns true if the error is caused
// when an image is not found in the docker host.
func IsErrImageNotFound(err error) bool {
_, ok := err.(imageNotFoundError)
return ok
}
// containerNotFoundError implements an error returned when a container is not in the docker host.
type containerNotFoundError struct {
containerID string
}
// Error returns a string representation of a containerNotFoundError
func (e containerNotFoundError) Error() string {
return fmt.Sprintf("Error: No such container: %s", e.containerID)
}
// IsErrContainerNotFound returns true if the error is caused
// when a container is not found in the docker host.
func IsErrContainerNotFound(err error) bool {
_, ok := err.(containerNotFoundError)
return ok
}
// networkNotFoundError implements an error returned when a network is not in the docker host.
type networkNotFoundError struct {
networkID string
}
// Error returns a string representation of a networkNotFoundError
func (e networkNotFoundError) Error() string {
return fmt.Sprintf("Error: No such network: %s", e.networkID)
}
// IsErrNetworkNotFound returns true if the error is caused
// when a network is not found in the docker host.
func IsErrNetworkNotFound(err error) bool {
_, ok := err.(networkNotFoundError)
return ok
}
// volumeNotFoundError implements an error returned when a volume is not in the docker host.
type volumeNotFoundError struct {
volumeID string
}
// Error returns a string representation of a networkNotFoundError
func (e volumeNotFoundError) Error() string {
return fmt.Sprintf("Error: No such volume: %s", e.volumeID)
}
// IsErrVolumeNotFound returns true if the error is caused
// when a volume is not found in the docker host.
func IsErrVolumeNotFound(err error) bool {
_, ok := err.(volumeNotFoundError)
return ok
}
// unauthorizedError represents an authorization error in a remote registry.
type unauthorizedError struct {
cause error
}
// Error returns a string representation of an unauthorizedError
func (u unauthorizedError) Error() string {
return u.cause.Error()
}
// IsErrUnauthorized returns true if the error is caused
// when a remote registry authentication fails
func IsErrUnauthorized(err error) bool {
_, ok := err.(unauthorizedError)
return ok
}

View File

@@ -1,48 +0,0 @@
package client
import (
"io"
"net/url"
"time"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
timetypes "github.com/docker/engine-api/types/time"
)
// Events returns a stream of events in the daemon in a ReadCloser.
// It's up to the caller to close the stream.
func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) {
query := url.Values{}
ref := time.Now()
if options.Since != "" {
ts, err := timetypes.GetTimestamp(options.Since, ref)
if err != nil {
return nil, err
}
query.Set("since", ts)
}
if options.Until != "" {
ts, err := timetypes.GetTimestamp(options.Until, ref)
if err != nil {
return nil, err
}
query.Set("until", ts)
}
if options.Filters.Len() > 0 {
filterJSON, err := filters.ToParam(options.Filters)
if err != nil {
return nil, err
}
query.Set("filters", filterJSON)
}
serverResponse, err := cli.get(ctx, "/events", query, nil)
if err != nil {
return nil, err
}
return serverResponse.body, nil
}

View File

@@ -1,174 +0,0 @@
package client
import (
"crypto/tls"
"errors"
"fmt"
"net"
"net/http/httputil"
"net/url"
"strings"
"time"
"github.com/docker/engine-api/types"
"github.com/docker/go-connections/sockets"
"golang.org/x/net/context"
)
// tlsClientCon holds tls information and a dialed connection.
type tlsClientCon struct {
*tls.Conn
rawConn net.Conn
}
func (c *tlsClientCon) CloseWrite() error {
// Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
// on its underlying connection.
if conn, ok := c.rawConn.(types.CloseWriter); ok {
return conn.CloseWrite()
}
return nil
}
// postHijacked sends a POST request and hijacks the connection.
func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) {
bodyEncoded, err := encodeData(body)
if err != nil {
return types.HijackedResponse{}, err
}
req, err := cli.newRequest("POST", path, query, bodyEncoded, headers)
if err != nil {
return types.HijackedResponse{}, err
}
req.Host = cli.addr
req.Header.Set("Connection", "Upgrade")
req.Header.Set("Upgrade", "tcp")
conn, err := dial(cli.proto, cli.addr, cli.transport.TLSConfig())
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?")
}
return types.HijackedResponse{}, err
}
// When we set up a TCP connection for hijack, there could be long periods
// of inactivity (a long running command with no output) that in certain
// network setups may cause ECONNTIMEOUT, leaving the client in an unknown
// state. Setting TCP KeepAlive on the socket connection will prohibit
// ECONNTIMEOUT unless the socket connection truly is broken
if tcpConn, ok := conn.(*net.TCPConn); ok {
tcpConn.SetKeepAlive(true)
tcpConn.SetKeepAlivePeriod(30 * time.Second)
}
clientconn := httputil.NewClientConn(conn, nil)
defer clientconn.Close()
// Server hijacks the connection, error 'connection closed' expected
_, err = clientconn.Do(req)
rwc, br := clientconn.Hijack()
return types.HijackedResponse{Conn: rwc, Reader: br}, err
}
func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
return tlsDialWithDialer(new(net.Dialer), network, addr, config)
}
// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in
// order to return our custom tlsClientCon struct which holds both the tls.Conn
// object _and_ its underlying raw connection. The rationale for this is that
// we need to be able to close the write end of the connection when attaching,
// which tls.Conn does not provide.
func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
// We want the Timeout and Deadline values from dialer to cover the
// whole process: TCP connection and TLS handshake. This means that we
// also need to start our own timers now.
timeout := dialer.Timeout
if !dialer.Deadline.IsZero() {
deadlineTimeout := dialer.Deadline.Sub(time.Now())
if timeout == 0 || deadlineTimeout < timeout {
timeout = deadlineTimeout
}
}
var errChannel chan error
if timeout != 0 {
errChannel = make(chan error, 2)
time.AfterFunc(timeout, func() {
errChannel <- errors.New("")
})
}
proxyDialer, err := sockets.DialerFromEnvironment(dialer)
if err != nil {
return nil, err
}
rawConn, err := proxyDialer.Dial(network, addr)
if err != nil {
return nil, err
}
// When we set up a TCP connection for hijack, there could be long periods
// of inactivity (a long running command with no output) that in certain
// network setups may cause ECONNTIMEOUT, leaving the client in an unknown
// state. Setting TCP KeepAlive on the socket connection will prohibit
// ECONNTIMEOUT unless the socket connection truly is broken
if tcpConn, ok := rawConn.(*net.TCPConn); ok {
tcpConn.SetKeepAlive(true)
tcpConn.SetKeepAlivePeriod(30 * time.Second)
}
colonPos := strings.LastIndex(addr, ":")
if colonPos == -1 {
colonPos = len(addr)
}
hostname := addr[:colonPos]
// If no ServerName is set, infer the ServerName
// from the hostname we're connecting to.
if config.ServerName == "" {
// Make a copy to avoid polluting argument or default.
c := *config
c.ServerName = hostname
config = &c
}
conn := tls.Client(rawConn, config)
if timeout == 0 {
err = conn.Handshake()
} else {
go func() {
errChannel <- conn.Handshake()
}()
err = <-errChannel
}
if err != nil {
rawConn.Close()
return nil, err
}
// This is Docker difference with standard's crypto/tls package: returned a
// wrapper which holds both the TLS and raw connections.
return &tlsClientCon{conn, rawConn}, nil
}
func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) {
if tlsConfig != nil && proto != "unix" && proto != "npipe" {
// Notice this isn't Go standard's tls.Dial function
return tlsDial(proto, addr, tlsConfig)
}
if proto == "npipe" {
return sockets.DialPipe(addr, 32*time.Second)
}
return net.Dial(proto, addr)
}

View File

@@ -1,135 +0,0 @@
package client
import (
"encoding/base64"
"encoding/json"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/container"
)
var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`)
// ImageBuild sends request to the daemon to build images.
// The Body in the response implement an io.ReadCloser and it's up to the caller to
// close it.
func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
query, err := imageBuildOptionsToQuery(options)
if err != nil {
return types.ImageBuildResponse{}, err
}
headers := http.Header(make(map[string][]string))
buf, err := json.Marshal(options.AuthConfigs)
if err != nil {
return types.ImageBuildResponse{}, err
}
headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
headers.Set("Content-Type", "application/tar")
serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers)
if err != nil {
return types.ImageBuildResponse{}, err
}
osType := getDockerOS(serverResp.header.Get("Server"))
return types.ImageBuildResponse{
Body: serverResp.body,
OSType: osType,
}, nil
}
func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) {
query := url.Values{
"t": options.Tags,
}
if options.SuppressOutput {
query.Set("q", "1")
}
if options.RemoteContext != "" {
query.Set("remote", options.RemoteContext)
}
if options.NoCache {
query.Set("nocache", "1")
}
if options.Remove {
query.Set("rm", "1")
} else {
query.Set("rm", "0")
}
if options.ForceRemove {
query.Set("forcerm", "1")
}
if options.PullParent {
query.Set("pull", "1")
}
if !container.Isolation.IsDefault(options.Isolation) {
query.Set("isolation", string(options.Isolation))
}
query.Set("cpusetcpus", options.CPUSetCPUs)
query.Set("cpusetmems", options.CPUSetMems)
query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10))
query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10))
query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10))
query.Set("memory", strconv.FormatInt(options.Memory, 10))
query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10))
query.Set("cgroupparent", options.CgroupParent)
query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10))
query.Set("dockerfile", options.Dockerfile)
ulimitsJSON, err := json.Marshal(options.Ulimits)
if err != nil {
return query, err
}
query.Set("ulimits", string(ulimitsJSON))
buildArgsJSON, err := json.Marshal(options.BuildArgs)
if err != nil {
return query, err
}
query.Set("buildargs", string(buildArgsJSON))
labelsJSON, err := json.Marshal(options.Labels)
if err != nil {
return query, err
}
query.Set("labels", string(labelsJSON))
return query, nil
}
func getDockerOS(serverHeader string) string {
var osType string
matches := headerRegexp.FindStringSubmatch(serverHeader)
if len(matches) > 0 {
osType = matches[1]
}
return osType
}
// convertKVStringsToMap converts ["key=value"] to {"key":"value"}
func convertKVStringsToMap(values []string) map[string]string {
result := make(map[string]string, len(values))
for _, value := range values {
kv := strings.SplitN(value, "=", 2)
if len(kv) == 1 {
result[kv[0]] = ""
} else {
result[kv[0]] = kv[1]
}
}
return result
}

View File

@@ -1,34 +0,0 @@
package client
import (
"io"
"net/url"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/reference"
)
// ImageCreate creates a new image based in the parent options.
// It returns the JSON content in the response body.
func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) {
repository, tag, err := reference.Parse(parentReference)
if err != nil {
return nil, err
}
query := url.Values{}
query.Set("fromImage", repository)
query.Set("tag", tag)
resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
if err != nil {
return nil, err
}
return resp.body, nil
}
func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) {
headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
return cli.post(ctx, "/images/create", query, nil, headers)
}

View File

@@ -1,22 +0,0 @@
package client
import (
"encoding/json"
"net/url"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// ImageHistory returns the changes in an image in history format.
func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) {
var history []types.ImageHistory
serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil)
if err != nil {
return history, err
}
err = json.NewDecoder(serverResp.body).Decode(&history)
ensureReaderClosed(serverResp)
return history, err
}

View File

@@ -1,37 +0,0 @@
package client
import (
"io"
"net/url"
"golang.org/x/net/context"
"github.com/docker/distribution/reference"
"github.com/docker/engine-api/types"
)
// ImageImport creates a new image based in the source options.
// It returns the JSON content in the response body.
func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) {
if ref != "" {
//Check if the given image name can be resolved
if _, err := reference.ParseNamed(ref); err != nil {
return nil, err
}
}
query := url.Values{}
query.Set("fromSrc", source.SourceName)
query.Set("repo", ref)
query.Set("tag", options.Tag)
query.Set("message", options.Message)
for _, change := range options.Changes {
query.Add("changes", change)
}
resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil)
if err != nil {
return nil, err
}
return resp.body, nil
}

View File

@@ -1,38 +0,0 @@
package client
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// ImageInspectWithRaw returns the image information and its raw representation.
func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string, getSize bool) (types.ImageInspect, []byte, error) {
query := url.Values{}
if getSize {
query.Set("size", "1")
}
serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil)
if err != nil {
if serverResp.statusCode == http.StatusNotFound {
return types.ImageInspect{}, nil, imageNotFoundError{imageID}
}
return types.ImageInspect{}, nil, err
}
defer ensureReaderClosed(serverResp)
body, err := ioutil.ReadAll(serverResp.body)
if err != nil {
return types.ImageInspect{}, nil, err
}
var response types.ImageInspect
rdr := bytes.NewReader(body)
err = json.NewDecoder(rdr).Decode(&response)
return response, body, err
}

View File

@@ -1,40 +0,0 @@
package client
import (
"encoding/json"
"net/url"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
"golang.org/x/net/context"
)
// ImageList returns a list of images in the docker host.
func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) {
var images []types.Image
query := url.Values{}
if options.Filters.Len() > 0 {
filterJSON, err := filters.ToParam(options.Filters)
if err != nil {
return images, err
}
query.Set("filters", filterJSON)
}
if options.MatchName != "" {
// FIXME rename this parameter, to not be confused with the filters flag
query.Set("filter", options.MatchName)
}
if options.All {
query.Set("all", "1")
}
serverResp, err := cli.get(ctx, "/images/json", query, nil)
if err != nil {
return images, err
}
err = json.NewDecoder(serverResp.body).Decode(&images)
ensureReaderClosed(serverResp)
return images, err
}

View File

@@ -1,30 +0,0 @@
package client
import (
"io"
"net/url"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
)
// ImageLoad loads an image in the docker host from the client host.
// It's up to the caller to close the io.ReadCloser in the
// ImageLoadResponse returned by this function.
func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) {
v := url.Values{}
v.Set("quiet", "0")
if quiet {
v.Set("quiet", "1")
}
headers := map[string][]string{"Content-Type": {"application/x-tar"}}
resp, err := cli.postRaw(ctx, "/images/load", v, input, headers)
if err != nil {
return types.ImageLoadResponse{}, err
}
return types.ImageLoadResponse{
Body: resp.body,
JSON: resp.header.Get("Content-Type") == "application/json",
}, nil
}

View File

@@ -1,46 +0,0 @@
package client
import (
"io"
"net/http"
"net/url"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/reference"
)
// ImagePull requests the docker host to pull an image from a remote registry.
// It executes the privileged function if the operation is unauthorized
// and it tries one more time.
// It's up to the caller to handle the io.ReadCloser and close it properly.
//
// FIXME(vdemeester): there is currently used in a few way in docker/docker
// - if not in trusted content, ref is used to pass the whole reference, and tag is empty
// - if in trusted content, ref is used to pass the reference name, and tag for the digest
func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) {
repository, tag, err := reference.Parse(ref)
if err != nil {
return nil, err
}
query := url.Values{}
query.Set("fromImage", repository)
if tag != "" && !options.All {
query.Set("tag", tag)
}
resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
newAuthHeader, privilegeErr := options.PrivilegeFunc()
if privilegeErr != nil {
return nil, privilegeErr
}
resp, err = cli.tryImageCreate(ctx, query, newAuthHeader)
}
if err != nil {
return nil, err
}
return resp.body, nil
}

View File

@@ -1,54 +0,0 @@
package client
import (
"errors"
"io"
"net/http"
"net/url"
"golang.org/x/net/context"
distreference "github.com/docker/distribution/reference"
"github.com/docker/engine-api/types"
)
// ImagePush requests the docker host to push an image to a remote registry.
// It executes the privileged function if the operation is unauthorized
// and it tries one more time.
// It's up to the caller to handle the io.ReadCloser and close it properly.
func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) {
distributionRef, err := distreference.ParseNamed(ref)
if err != nil {
return nil, err
}
if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical {
return nil, errors.New("cannot push a digest reference")
}
var tag = ""
if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged {
tag = nameTaggedRef.Tag()
}
query := url.Values{}
query.Set("tag", tag)
resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth)
if resp.statusCode == http.StatusUnauthorized {
newAuthHeader, privilegeErr := options.PrivilegeFunc()
if privilegeErr != nil {
return nil, privilegeErr
}
resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader)
}
if err != nil {
return nil, err
}
return resp.body, nil
}
func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (*serverResponse, error) {
headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers)
}

View File

@@ -1,31 +0,0 @@
package client
import (
"encoding/json"
"net/url"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// ImageRemove removes an image from the docker host.
func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) {
query := url.Values{}
if options.Force {
query.Set("force", "1")
}
if !options.PruneChildren {
query.Set("noprune", "1")
}
resp, err := cli.delete(ctx, "/images/"+imageID, query, nil)
if err != nil {
return nil, err
}
var dels []types.ImageDelete
err = json.NewDecoder(resp.body).Decode(&dels)
ensureReaderClosed(resp)
return dels, err
}

View File

@@ -1,22 +0,0 @@
package client
import (
"io"
"net/url"
"golang.org/x/net/context"
)
// ImageSave retrieves one or more images from the docker host as an io.ReadCloser.
// It's up to the caller to store the images and close the stream.
func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) {
query := url.Values{
"names": imageIDs,
}
resp, err := cli.get(ctx, "/images/get", query, nil)
if err != nil {
return nil, err
}
return resp.body, nil
}

View File

@@ -1,49 +0,0 @@
package client
import (
"encoding/json"
"net/http"
"net/url"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
"github.com/docker/engine-api/types/registry"
"golang.org/x/net/context"
)
// ImageSearch makes the docker host to search by a term in a remote registry.
// The list of results is not sorted in any fashion.
func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) {
var results []registry.SearchResult
query := url.Values{}
query.Set("term", term)
if options.Filters.Len() > 0 {
filterJSON, err := filters.ToParam(options.Filters)
if err != nil {
return results, err
}
query.Set("filters", filterJSON)
}
resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth)
if resp.statusCode == http.StatusUnauthorized {
newAuthHeader, privilegeErr := options.PrivilegeFunc()
if privilegeErr != nil {
return results, privilegeErr
}
resp, err = cli.tryImageSearch(ctx, query, newAuthHeader)
}
if err != nil {
return results, err
}
err = json.NewDecoder(resp.body).Decode(&results)
ensureReaderClosed(resp)
return results, err
}
func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) {
headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
return cli.get(ctx, "/images/search", query, headers)
}

View File

@@ -1,38 +0,0 @@
package client
import (
"errors"
"fmt"
"net/url"
"golang.org/x/net/context"
distreference "github.com/docker/distribution/reference"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/reference"
)
// ImageTag tags an image in the docker host
func (cli *Client) ImageTag(ctx context.Context, imageID, ref string, options types.ImageTagOptions) error {
distributionRef, err := distreference.ParseNamed(ref)
if err != nil {
return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref)
}
if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical {
return errors.New("refusing to create a tag with a digest reference")
}
tag := reference.GetTagFromNamedRef(distributionRef)
query := url.Values{}
query.Set("repo", distributionRef.Name())
query.Set("tag", tag)
if options.Force {
query.Set("force", "1")
}
resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@@ -1,26 +0,0 @@
package client
import (
"encoding/json"
"fmt"
"net/url"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// Info returns information about the docker server.
func (cli *Client) Info(ctx context.Context) (types.Info, error) {
var info types.Info
serverResp, err := cli.get(ctx, "/info", url.Values{}, nil)
if err != nil {
return info, err
}
defer ensureReaderClosed(serverResp)
if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil {
return info, fmt.Errorf("Error reading remote info: %v", err)
}
return info, nil
}

View File

@@ -1,79 +0,0 @@
package client
import (
"io"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/container"
"github.com/docker/engine-api/types/filters"
"github.com/docker/engine-api/types/network"
"github.com/docker/engine-api/types/registry"
)
// APIClient is an interface that clients that talk with a docker server must implement.
type APIClient interface {
ClientVersion() string
ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error)
ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error)
ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error)
ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error)
ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error)
ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error)
ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error)
ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error
ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error
ContainerExport(ctx context.Context, container string) (io.ReadCloser, error)
ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error)
ContainerKill(ctx context.Context, container, signal string) error
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error)
ContainerPause(ctx context.Context, container string) error
ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error
ContainerRename(ctx context.Context, container, newContainerName string) error
ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error
ContainerRestart(ctx context.Context, container string, timeout int) error
ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error)
ContainerStats(ctx context.Context, container string, stream bool) (io.ReadCloser, error)
ContainerStart(ctx context.Context, container string) error
ContainerStop(ctx context.Context, container string, timeout int) error
ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error)
ContainerUnpause(ctx context.Context, container string) error
ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) error
ContainerWait(ctx context.Context, container string) (int, error)
CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error
Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error)
ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error)
ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error)
ImageInspectWithRaw(ctx context.Context, image string, getSize bool) (types.ImageInspect, []byte, error)
ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error)
ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error)
ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error)
ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error)
ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error)
ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
ImageTag(ctx context.Context, image, ref string, options types.ImageTagOptions) error
Info(ctx context.Context) (types.Info, error)
NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error
NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error)
NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error
NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error)
NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error)
NetworkRemove(ctx context.Context, networkID string) error
RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error)
ServerVersion(ctx context.Context) (types.Version, error)
UpdateClientVersion(v string)
VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error)
VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error)
VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error)
VolumeRemove(ctx context.Context, volumeID string) error
}
// Ensure that Client always implements APIClient.
var _ APIClient = &Client{}

View File

@@ -1,28 +0,0 @@
package client
import (
"encoding/json"
"net/http"
"net/url"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// RegistryLogin authenticates the docker server with a given docker registry.
// It returns UnauthorizerError when the authentication fails.
func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) {
resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil)
if resp != nil && resp.statusCode == http.StatusUnauthorized {
return types.AuthResponse{}, unauthorizedError{err}
}
if err != nil {
return types.AuthResponse{}, err
}
var response types.AuthResponse
err = json.NewDecoder(resp.body).Decode(&response)
ensureReaderClosed(resp)
return response, err
}

View File

@@ -1,18 +0,0 @@
package client
import (
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/network"
"golang.org/x/net/context"
)
// NetworkConnect connects a container to an existent network in the docker host.
func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error {
nc := types.NetworkConnect{
Container: containerID,
EndpointConfig: config,
}
resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil)
ensureReaderClosed(resp)
return err
}

View File

@@ -1,25 +0,0 @@
package client
import (
"encoding/json"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// NetworkCreate creates a new network in the docker host.
func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) {
networkCreateRequest := types.NetworkCreateRequest{
NetworkCreate: options,
Name: name,
}
var response types.NetworkCreateResponse
serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil)
if err != nil {
return response, err
}
json.NewDecoder(serverResp.body).Decode(&response)
ensureReaderClosed(serverResp)
return response, err
}

View File

@@ -1,14 +0,0 @@
package client
import (
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// NetworkDisconnect disconnects a container from an existent network in the docker host.
func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error {
nd := types.NetworkDisconnect{Container: containerID, Force: force}
resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil)
ensureReaderClosed(resp)
return err
}

View File

@@ -1,24 +0,0 @@
package client
import (
"encoding/json"
"net/http"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// NetworkInspect returns the information for a specific network configured in the docker host.
func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) {
var networkResource types.NetworkResource
resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil)
if err != nil {
if resp.statusCode == http.StatusNotFound {
return networkResource, networkNotFoundError{networkID}
}
return networkResource, err
}
err = json.NewDecoder(resp.body).Decode(&networkResource)
ensureReaderClosed(resp)
return networkResource, err
}

View File

@@ -1,31 +0,0 @@
package client
import (
"encoding/json"
"net/url"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
"golang.org/x/net/context"
)
// NetworkList returns the list of networks configured in the docker host.
func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) {
query := url.Values{}
if options.Filters.Len() > 0 {
filterJSON, err := filters.ToParam(options.Filters)
if err != nil {
return nil, err
}
query.Set("filters", filterJSON)
}
var networkResources []types.NetworkResource
resp, err := cli.get(ctx, "/networks", query, nil)
if err != nil {
return networkResources, err
}
err = json.NewDecoder(resp.body).Decode(&networkResources)
ensureReaderClosed(resp)
return networkResources, err
}

View File

@@ -1,10 +0,0 @@
package client
import "golang.org/x/net/context"
// NetworkRemove removes an existent network from the docker host.
func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error {
resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@@ -1,185 +0,0 @@
package client
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/docker/engine-api/client/transport/cancellable"
"golang.org/x/net/context"
)
// serverResponse is a wrapper for http API responses.
type serverResponse struct {
body io.ReadCloser
header http.Header
statusCode int
}
// head sends an http request to the docker API using the method HEAD.
func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) {
return cli.sendRequest(ctx, "HEAD", path, query, nil, headers)
}
// getWithContext sends an http request to the docker API using the method GET with a specific go context.
func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) {
return cli.sendRequest(ctx, "GET", path, query, nil, headers)
}
// postWithContext sends an http request to the docker API using the method POST with a specific go context.
func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) {
return cli.sendRequest(ctx, "POST", path, query, obj, headers)
}
func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) {
return cli.sendClientRequest(ctx, "POST", path, query, body, headers)
}
// put sends an http request to the docker API using the method PUT.
func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) {
return cli.sendRequest(ctx, "PUT", path, query, obj, headers)
}
// put sends an http request to the docker API using the method PUT.
func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) {
return cli.sendClientRequest(ctx, "PUT", path, query, body, headers)
}
// delete sends an http request to the docker API using the method DELETE.
func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) {
return cli.sendRequest(ctx, "DELETE", path, query, nil, headers)
}
func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) {
var body io.Reader
if obj != nil {
var err error
body, err = encodeData(obj)
if err != nil {
return nil, err
}
if headers == nil {
headers = make(map[string][]string)
}
headers["Content-Type"] = []string{"application/json"}
}
return cli.sendClientRequest(ctx, method, path, query, body, headers)
}
func (cli *Client) sendClientRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) {
serverResp := &serverResponse{
body: nil,
statusCode: -1,
}
expectedPayload := (method == "POST" || method == "PUT")
if expectedPayload && body == nil {
body = bytes.NewReader([]byte{})
}
req, err := cli.newRequest(method, path, query, body, headers)
if cli.proto == "unix" || cli.proto == "npipe" {
// For local communications, it doesn't matter what the host is. We just
// need a valid and meaningful host name. (See #189)
req.Host = "docker"
}
req.URL.Host = cli.addr
req.URL.Scheme = cli.transport.Scheme()
if expectedPayload && req.Header.Get("Content-Type") == "" {
req.Header.Set("Content-Type", "text/plain")
}
resp, err := cancellable.Do(ctx, cli.transport, req)
if resp != nil {
serverResp.statusCode = resp.StatusCode
}
if err != nil {
if isTimeout(err) || strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
return serverResp, ErrConnectionFailed
}
if !cli.transport.Secure() && strings.Contains(err.Error(), "malformed HTTP response") {
return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)
}
if cli.transport.Secure() && strings.Contains(err.Error(), "remote error: bad certificate") {
return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err)
}
return serverResp, fmt.Errorf("An error occurred trying to connect: %v", err)
}
if serverResp.statusCode < 200 || serverResp.statusCode >= 400 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return serverResp, err
}
if len(body) == 0 {
return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL)
}
return serverResp, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body))
}
serverResp.body = resp.Body
serverResp.header = resp.Header
return serverResp, nil
}
func (cli *Client) newRequest(method, path string, query url.Values, body io.Reader, headers map[string][]string) (*http.Request, error) {
apiPath := cli.getAPIPath(path, query)
req, err := http.NewRequest(method, apiPath, body)
if err != nil {
return nil, err
}
// Add CLI Config's HTTP Headers BEFORE we set the Docker headers
// then the user can't change OUR headers
for k, v := range cli.customHTTPHeaders {
req.Header.Set(k, v)
}
if headers != nil {
for k, v := range headers {
req.Header[k] = v
}
}
return req, nil
}
func encodeData(data interface{}) (*bytes.Buffer, error) {
params := bytes.NewBuffer(nil)
if data != nil {
if err := json.NewEncoder(params).Encode(data); err != nil {
return nil, err
}
}
return params, nil
}
func ensureReaderClosed(response *serverResponse) {
if response != nil && response.body != nil {
response.body.Close()
}
}
func isTimeout(err error) bool {
type timeout interface {
Timeout() bool
}
e := err
switch urlErr := err.(type) {
case *url.Error:
e = urlErr.Err
}
t, ok := e.(timeout)
return ok && t.Timeout()
}

View File

@@ -1,23 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.5
package cancellable
import (
"net/http"
"github.com/docker/engine-api/client/transport"
)
func canceler(client transport.Sender, req *http.Request) func() {
// TODO(djd): Respect any existing value of req.Cancel.
ch := make(chan struct{})
req.Cancel = ch
return func() {
close(ch)
}
}

View File

@@ -1,27 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.5
package cancellable
import (
"net/http"
"github.com/docker/engine-api/client/transport"
)
type requestCanceler interface {
CancelRequest(*http.Request)
}
func canceler(client transport.Sender, req *http.Request) func() {
rc, ok := client.(requestCanceler)
if !ok {
return func() {}
}
return func() {
rc.CancelRequest(req)
}
}

View File

@@ -1,113 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cancellable provides helper function to cancel http requests.
package cancellable
import (
"io"
"net/http"
"github.com/docker/engine-api/client/transport"
"golang.org/x/net/context"
)
func nop() {}
var (
testHookContextDoneBeforeHeaders = nop
testHookDoReturned = nop
testHookDidBodyClose = nop
)
// Do sends an HTTP request with the provided transport.Sender and returns an HTTP response.
// If the client is nil, http.DefaultClient is used.
// If the context is canceled or times out, ctx.Err() will be returned.
//
// FORK INFORMATION:
//
// This function deviates from the upstream version in golang.org/x/net/context/ctxhttp by
// taking a Sender interface rather than a *http.Client directly. That allow us to use
// this funcion with mocked clients and hijacked connections.
func Do(ctx context.Context, client transport.Sender, req *http.Request) (*http.Response, error) {
if client == nil {
client = http.DefaultClient
}
// Request cancelation changed in Go 1.5, see canceler.go and canceler_go14.go.
cancel := canceler(client, req)
type responseAndError struct {
resp *http.Response
err error
}
result := make(chan responseAndError, 1)
go func() {
resp, err := client.Do(req)
testHookDoReturned()
result <- responseAndError{resp, err}
}()
var resp *http.Response
select {
case <-ctx.Done():
testHookContextDoneBeforeHeaders()
cancel()
// Clean up after the goroutine calling client.Do:
go func() {
if r := <-result; r.resp != nil && r.resp.Body != nil {
testHookDidBodyClose()
r.resp.Body.Close()
}
}()
return nil, ctx.Err()
case r := <-result:
var err error
resp, err = r.resp, r.err
if err != nil {
return resp, err
}
}
c := make(chan struct{})
go func() {
select {
case <-ctx.Done():
cancel()
case <-c:
// The response's Body is closed.
}
}()
resp.Body = &notifyingReader{resp.Body, c}
return resp, nil
}
// notifyingReader is an io.ReadCloser that closes the notify channel after
// Close is called or a Read fails on the underlying ReadCloser.
type notifyingReader struct {
io.ReadCloser
notify chan<- struct{}
}
func (r *notifyingReader) Read(p []byte) (int, error) {
n, err := r.ReadCloser.Read(p)
if err != nil && r.notify != nil {
close(r.notify)
r.notify = nil
}
return n, err
}
func (r *notifyingReader) Close() error {
err := r.ReadCloser.Close()
if r.notify != nil {
close(r.notify)
r.notify = nil
}
return err
}

View File

@@ -1,47 +0,0 @@
package transport
import (
"crypto/tls"
"net/http"
)
// Sender is an interface that clients must implement
// to be able to send requests to a remote connection.
type Sender interface {
// Do sends request to a remote endpoint.
Do(*http.Request) (*http.Response, error)
}
// Client is an interface that abstracts all remote connections.
type Client interface {
Sender
// Secure tells whether the connection is secure or not.
Secure() bool
// Scheme returns the connection protocol the client uses.
Scheme() string
// TLSConfig returns any TLS configuration the client uses.
TLSConfig() *tls.Config
}
// tlsInfo returns information about the TLS configuration.
type tlsInfo struct {
tlsConfig *tls.Config
}
// TLSConfig returns the TLS configuration.
func (t *tlsInfo) TLSConfig() *tls.Config {
return t.tlsConfig
}
// Scheme returns protocol scheme to use.
func (t *tlsInfo) Scheme() string {
if t.tlsConfig != nil {
return "https"
}
return "http"
}
// Secure returns true if there is a TLS configuration.
func (t *tlsInfo) Secure() bool {
return t.tlsConfig != nil
}

View File

@@ -1,57 +0,0 @@
// Package transport provides function to send request to remote endpoints.
package transport
import (
"fmt"
"net/http"
"github.com/docker/go-connections/sockets"
)
// apiTransport holds information about the http transport to connect with the API.
type apiTransport struct {
*http.Client
*tlsInfo
transport *http.Transport
}
// NewTransportWithHTTP creates a new transport based on the provided proto, address and http client.
// It uses Docker's default http transport configuration if the client is nil.
// It does not modify the client's transport if it's not nil.
func NewTransportWithHTTP(proto, addr string, client *http.Client) (Client, error) {
var transport *http.Transport
if client != nil {
tr, ok := client.Transport.(*http.Transport)
if !ok {
return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport)
}
transport = tr
} else {
transport = defaultTransport(proto, addr)
client = &http.Client{
Transport: transport,
}
}
return &apiTransport{
Client: client,
tlsInfo: &tlsInfo{transport.TLSClientConfig},
transport: transport,
}, nil
}
// CancelRequest stops a request execution.
func (a *apiTransport) CancelRequest(req *http.Request) {
a.transport.CancelRequest(req)
}
// defaultTransport creates a new http.Transport with Docker's
// default transport configuration.
func defaultTransport(proto, addr string) *http.Transport {
tr := new(http.Transport)
sockets.ConfigureTransport(tr, proto, addr)
return tr
}
var _ Client = &apiTransport{}

View File

@@ -1,21 +0,0 @@
package client
import (
"encoding/json"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// ServerVersion returns information of the docker client and server host.
func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) {
resp, err := cli.get(ctx, "/version", nil, nil)
if err != nil {
return types.Version{}, err
}
var server types.Version
err = json.NewDecoder(resp.body).Decode(&server)
ensureReaderClosed(resp)
return server, err
}

View File

@@ -1,20 +0,0 @@
package client
import (
"encoding/json"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// VolumeCreate creates a volume in the docker host.
func (cli *Client) VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) {
var volume types.Volume
resp, err := cli.post(ctx, "/volumes/create", nil, options, nil)
if err != nil {
return volume, err
}
err = json.NewDecoder(resp.body).Decode(&volume)
ensureReaderClosed(resp)
return volume, err
}

View File

@@ -1,24 +0,0 @@
package client
import (
"encoding/json"
"net/http"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
// VolumeInspect returns the information about a specific volume in the docker host.
func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) {
var volume types.Volume
resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)
if err != nil {
if resp.statusCode == http.StatusNotFound {
return volume, volumeNotFoundError{volumeID}
}
return volume, err
}
err = json.NewDecoder(resp.body).Decode(&volume)
ensureReaderClosed(resp)
return volume, err
}

View File

@@ -1,32 +0,0 @@
package client
import (
"encoding/json"
"net/url"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
"golang.org/x/net/context"
)
// VolumeList returns the volumes configured in the docker host.
func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) {
var volumes types.VolumesListResponse
query := url.Values{}
if filter.Len() > 0 {
filterJSON, err := filters.ToParam(filter)
if err != nil {
return volumes, err
}
query.Set("filters", filterJSON)
}
resp, err := cli.get(ctx, "/volumes", query, nil)
if err != nil {
return volumes, err
}
err = json.NewDecoder(resp.body).Decode(&volumes)
ensureReaderClosed(resp)
return volumes, err
}

View File

@@ -1,10 +0,0 @@
package client
import "golang.org/x/net/context"
// VolumeRemove removes a volume from the docker host.
func (cli *Client) VolumeRemove(ctx context.Context, volumeID string) error {
resp, err := cli.delete(ctx, "/volumes/"+volumeID, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@@ -1,21 +0,0 @@
/*
Package engineapi provides libraries to implement client and server components compatible with the Docker engine.
The client package in github.com/docker/engine-api/client implements all necessary requests to implement the official Docker engine cli.
Create a new client, then use it to send and receive messages to the Docker engine API:
defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"}
cli, err := client.NewClient("unix:///var/run/docker.sock", "v1.22", nil, defaultHeaders)
Other programs, like Docker Machine, can set the default Docker engine environment for you. There is a shortcut to use its variables to configure the client:
cli, err := client.NewEnvClient()
All request arguments are defined as typed structures in the types package. For instance, this is how to get all containers running in the host:
options := types.ContainerListOptions{All: true}
containers, err := cli.ContainerList(context.Background(), options)
*/
package engineapi

View File

@@ -1,34 +0,0 @@
package reference
import (
distreference "github.com/docker/distribution/reference"
)
// Parse parses the given references and returns the repository and
// tag (if present) from it. If there is an error during parsing, it will
// return an error.
func Parse(ref string) (string, string, error) {
distributionRef, err := distreference.ParseNamed(ref)
if err != nil {
return "", "", err
}
tag := GetTagFromNamedRef(distributionRef)
return distributionRef.Name(), tag, nil
}
// GetTagFromNamedRef returns a tag from the specified reference.
// This function is necessary as long as the docker "server" api makes the distinction between repository
// and tags.
func GetTagFromNamedRef(ref distreference.Named) string {
var tag string
switch x := ref.(type) {
case distreference.Digested:
tag = x.Digest().String()
case distreference.NamedTagged:
tag = x.Tag()
default:
tag = "latest"
}
return tag
}

View File

@@ -1,124 +0,0 @@
package time
import (
"fmt"
"math"
"strconv"
"strings"
"time"
)
// These are additional predefined layouts for use in Time.Format and Time.Parse
// with --since and --until parameters for `docker logs` and `docker events`
const (
rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
)
// GetTimestamp tries to parse given string as golang duration,
// then RFC3339 time and finally as a Unix timestamp. If
// any of these were successful, it returns a Unix timestamp
// as string otherwise returns the given value back.
// In case of duration input, the returned timestamp is computed
// as the given reference time minus the amount of the duration.
func GetTimestamp(value string, reference time.Time) (string, error) {
if d, err := time.ParseDuration(value); value != "0" && err == nil {
return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil
}
var format string
var parseInLocation bool
// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
if strings.Contains(value, ".") {
if parseInLocation {
format = rFC3339NanoLocal
} else {
format = time.RFC3339Nano
}
} else if strings.Contains(value, "T") {
// we want the number of colons in the T portion of the timestamp
tcolons := strings.Count(value, ":")
// if parseInLocation is off and we have a +/- zone offset (not Z) then
// there will be an extra colon in the input for the tz offset subtract that
// colon from the tcolons count
if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
tcolons--
}
if parseInLocation {
switch tcolons {
case 0:
format = "2006-01-02T15"
case 1:
format = "2006-01-02T15:04"
default:
format = rFC3339Local
}
} else {
switch tcolons {
case 0:
format = "2006-01-02T15Z07:00"
case 1:
format = "2006-01-02T15:04Z07:00"
default:
format = time.RFC3339
}
}
} else if parseInLocation {
format = dateLocal
} else {
format = dateWithZone
}
var t time.Time
var err error
if parseInLocation {
t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone()))
} else {
t, err = time.Parse(format, value)
}
if err != nil {
// if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp
if strings.Contains(value, "-") {
return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
}
return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
}
return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
}
// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
// if the incoming nanosecond portion is longer or shorter than 9 digits it is
// converted to nanoseconds. The expectation is that the seconds and
// seconds will be used to create a time variable. For example:
// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
// if err == nil since := time.Unix(seconds, nanoseconds)
// returns seconds as def(aultSeconds) if value == ""
func ParseTimestamps(value string, def int64) (int64, int64, error) {
if value == "" {
return def, 0, nil
}
sa := strings.SplitN(value, ".", 2)
s, err := strconv.ParseInt(sa[0], 10, 64)
if err != nil {
return s, 0, err
}
if len(sa) != 2 {
return s, 0, nil
}
n, err := strconv.ParseInt(sa[1], 10, 64)
if err != nil {
return s, n, err
}
// should already be in nanoseconds but just in case convert n to nanoseonds
n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1]))))
return s, n, nil
}

View File

@@ -1,3 +0,0 @@
// Package connections provides libraries to work with network connections.
// This library is divided in several components for specific usage.
package connections

View File

@@ -1,89 +0,0 @@
package sockets
import (
"errors"
"net"
"sync"
)
var errClosed = errors.New("use of closed network connection")
// InmemSocket implements net.Listener using in-memory only connections.
type InmemSocket struct {
chConn chan net.Conn
chClose chan struct{}
addr string
mu sync.Mutex
}
// dummyAddr is used to satisfy net.Addr for the in-mem socket
// it is just stored as a string and returns the string for all calls
type dummyAddr string
// NewInmemSocket creates an in-memory only net.Listener
// The addr argument can be any string, but is used to satisfy the `Addr()` part
// of the net.Listener interface
func NewInmemSocket(addr string, bufSize int) *InmemSocket {
return &InmemSocket{
chConn: make(chan net.Conn, bufSize),
chClose: make(chan struct{}),
addr: addr,
}
}
// Addr returns the socket's addr string to satisfy net.Listener
func (s *InmemSocket) Addr() net.Addr {
return dummyAddr(s.addr)
}
// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn.
func (s *InmemSocket) Accept() (net.Conn, error) {
select {
case conn := <-s.chConn:
return conn, nil
case <-s.chClose:
return nil, errClosed
}
}
// Close closes the listener. It will be unavailable for use once closed.
func (s *InmemSocket) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
select {
case <-s.chClose:
default:
close(s.chClose)
}
return nil
}
// Dial is used to establish a connection with the in-mem server
func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) {
srvConn, clientConn := net.Pipe()
select {
case s.chConn <- srvConn:
case <-s.chClose:
return nil, errClosed
}
return clientConn, nil
}
// Network returns the addr string, satisfies net.Addr
func (a dummyAddr) Network() string {
return string(a)
}
// String returns the string form
func (a dummyAddr) String() string {
return string(a)
}
// timeoutError is used when there is a timeout with a connection
// this implements the net.Error interface
type timeoutError struct{}
func (e *timeoutError) Error() string { return "i/o timeout" }
func (e *timeoutError) Timeout() bool { return true }
func (e *timeoutError) Temporary() bool { return true }

View File

@@ -1,51 +0,0 @@
package sockets
import (
"net"
"net/url"
"os"
"strings"
"golang.org/x/net/proxy"
)
// GetProxyEnv allows access to the uppercase and the lowercase forms of
// proxy-related variables. See the Go specification for details on these
// variables. https://golang.org/pkg/net/http/
func GetProxyEnv(key string) string {
proxyValue := os.Getenv(strings.ToUpper(key))
if proxyValue == "" {
return os.Getenv(strings.ToLower(key))
}
return proxyValue
}
// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a
// proxy.Dialer which will route the connections through the proxy using the
// given dialer.
func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) {
allProxy := GetProxyEnv("all_proxy")
if len(allProxy) == 0 {
return direct, nil
}
proxyURL, err := url.Parse(allProxy)
if err != nil {
return direct, err
}
proxyFromURL, err := proxy.FromURL(proxyURL, direct)
if err != nil {
return direct, err
}
noProxy := GetProxyEnv("no_proxy")
if len(noProxy) == 0 {
return proxyFromURL, nil
}
perHost := proxy.NewPerHost(proxyFromURL, direct)
perHost.AddFromString(noProxy)
return perHost, nil
}

View File

@@ -1,42 +0,0 @@
// Package sockets provides helper functions to create and configure Unix or TCP sockets.
package sockets
import (
"net"
"net/http"
"time"
)
// Why 32? See https://github.com/docker/docker/pull/8035.
const defaultTimeout = 32 * time.Second
// ConfigureTransport configures the specified Transport according to the
// specified proto and addr.
// If the proto is unix (using a unix socket to communicate) or npipe the
// compression is disabled.
func ConfigureTransport(tr *http.Transport, proto, addr string) error {
switch proto {
case "unix":
// No need for compression in local communications.
tr.DisableCompression = true
tr.Dial = func(_, _ string) (net.Conn, error) {
return net.DialTimeout(proto, addr, defaultTimeout)
}
case "npipe":
// No need for compression in local communications.
tr.DisableCompression = true
tr.Dial = func(_, _ string) (net.Conn, error) {
return DialPipe(addr, defaultTimeout)
}
default:
tr.Proxy = http.ProxyFromEnvironment
dialer, err := DialerFromEnvironment(&net.Dialer{
Timeout: defaultTimeout,
})
if err != nil {
return err
}
tr.Dial = dialer.Dial
}
return nil
}

View File

@@ -1,15 +0,0 @@
// +build !windows
package sockets
import (
"net"
"syscall"
"time"
)
// DialPipe connects to a Windows named pipe.
// This is not supported on other OSes.
func DialPipe(_ string, _ time.Duration) (net.Conn, error) {
return nil, syscall.EAFNOSUPPORT
}

View File

@@ -1,13 +0,0 @@
package sockets
import (
"net"
"time"
"github.com/Microsoft/go-winio"
)
// DialPipe connects to a Windows named pipe.
func DialPipe(addr string, timeout time.Duration) (net.Conn, error) {
return winio.DialPipe(addr, &timeout)
}

View File

@@ -1,22 +0,0 @@
// Package sockets provides helper functions to create and configure Unix or TCP sockets.
package sockets
import (
"crypto/tls"
"net"
)
// NewTCPSocket creates a TCP socket listener with the specified address and
// and the specified tls configuration. If TLSConfig is set, will encapsulate the
// TCP listener inside a TLS one.
func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) {
l, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
if tlsConfig != nil {
tlsConfig.NextProtos = []string{"http/1.1"}
l = tls.NewListener(l, tlsConfig)
}
return l, nil
}

View File

@@ -1,80 +0,0 @@
// +build linux freebsd solaris
package sockets
import (
"fmt"
"net"
"os"
"strconv"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/opencontainers/runc/libcontainer/user"
)
// NewUnixSocket creates a unix socket with the specified path and group.
func NewUnixSocket(path, group string) (net.Listener, error) {
if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) {
return nil, err
}
mask := syscall.Umask(0777)
defer syscall.Umask(mask)
l, err := net.Listen("unix", path)
if err != nil {
return nil, err
}
if err := setSocketGroup(path, group); err != nil {
l.Close()
return nil, err
}
if err := os.Chmod(path, 0660); err != nil {
l.Close()
return nil, err
}
return l, nil
}
func setSocketGroup(path, group string) error {
if group == "" {
return nil
}
if err := changeGroup(path, group); err != nil {
if group != "docker" {
return err
}
logrus.Debugf("Warning: could not change group %s to docker: %v", path, err)
}
return nil
}
func changeGroup(path string, nameOrGid string) error {
gid, err := lookupGidByName(nameOrGid)
if err != nil {
return err
}
logrus.Debugf("%s group found. gid: %d", nameOrGid, gid)
return os.Chown(path, 0, gid)
}
func lookupGidByName(nameOrGid string) (int, error) {
groupFile, err := user.GetGroupPath()
if err != nil {
return -1, err
}
groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool {
return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid
})
if err != nil {
return -1, err
}
if groups != nil && len(groups) > 0 {
return groups[0].Gid, nil
}
gid, err := strconv.Atoi(nameOrGid)
if err == nil {
logrus.Warnf("Could not find GID %d", gid)
return gid, nil
}
return -1, fmt.Errorf("Group %s not found", nameOrGid)
}

View File

@@ -1,122 +0,0 @@
// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
//
// As a reminder from https://golang.org/pkg/crypto/tls/#Config:
// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified.
// A Config may be reused; the tls package will also not modify it.
package tlsconfig
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"os"
"github.com/Sirupsen/logrus"
)
// Options represents the information needed to create client and server TLS configurations.
type Options struct {
CAFile string
// If either CertFile or KeyFile is empty, Client() will not load them
// preventing the client from authenticating to the server.
// However, Server() requires them and will error out if they are empty.
CertFile string
KeyFile string
// client-only option
InsecureSkipVerify bool
// server-only option
ClientAuth tls.ClientAuthType
}
// Extra (server-side) accepted CBC cipher suites - will phase out in the future
var acceptedCBCCiphers = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
}
// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls
// options struct but wants to use a commonly accepted set of TLS cipher suites, with
// known weak algorithms removed.
var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...)
// ServerDefault is a secure-enough TLS configuration for the server TLS configuration.
var ServerDefault = tls.Config{
// Avoid fallback to SSL protocols < TLS1.0
MinVersion: tls.VersionTLS10,
PreferServerCipherSuites: true,
CipherSuites: DefaultServerAcceptedCiphers,
}
// ClientDefault is a secure-enough TLS configuration for the client TLS configuration.
var ClientDefault = tls.Config{
// Prefer TLS1.2 as the client minimum
MinVersion: tls.VersionTLS12,
CipherSuites: clientCipherSuites,
}
// certPool returns an X.509 certificate pool from `caFile`, the certificate file.
func certPool(caFile string) (*x509.CertPool, error) {
// If we should verify the server, we need to load a trusted ca
certPool := x509.NewCertPool()
pem, err := ioutil.ReadFile(caFile)
if err != nil {
return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err)
}
if !certPool.AppendCertsFromPEM(pem) {
return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile)
}
logrus.Debugf("Trusting %d certs", len(certPool.Subjects()))
return certPool, nil
}
// Client returns a TLS configuration meant to be used by a client.
func Client(options Options) (*tls.Config, error) {
tlsConfig := ClientDefault
tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify
if !options.InsecureSkipVerify && options.CAFile != "" {
CAs, err := certPool(options.CAFile)
if err != nil {
return nil, err
}
tlsConfig.RootCAs = CAs
}
if options.CertFile != "" || options.KeyFile != "" {
tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
if err != nil {
return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err)
}
tlsConfig.Certificates = []tls.Certificate{tlsCert}
}
return &tlsConfig, nil
}
// Server returns a TLS configuration meant to be used by a server.
func Server(options Options) (*tls.Config, error) {
tlsConfig := ServerDefault
tlsConfig.ClientAuth = options.ClientAuth
tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
if err != nil {
if os.IsNotExist(err) {
return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err)
}
return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err)
}
tlsConfig.Certificates = []tls.Certificate{tlsCert}
if options.ClientAuth >= tls.VerifyClientCertIfGiven {
CAs, err := certPool(options.CAFile)
if err != nil {
return nil, err
}
tlsConfig.ClientCAs = CAs
}
return &tlsConfig, nil
}

View File

@@ -1,17 +0,0 @@
// +build go1.5
// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
//
package tlsconfig
import (
"crypto/tls"
)
// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
var clientCipherSuites = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
}

View File

@@ -1,15 +0,0 @@
// +build !go1.5
// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
//
package tlsconfig
import (
"crypto/tls"
)
// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
var clientCipherSuites = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
}

14
tests/smoke/vendor/github.com/gogo/protobuf/AUTHORS generated vendored Normal file
View File

@@ -0,0 +1,14 @@
# This is the official list of GoGo authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS file, which
# lists people. For example, employees are listed in CONTRIBUTORS,
# but not in AUTHORS, because the employer holds the copyright.
# Names should be added to this file as one of
# Organization's name
# Individual's name <submission email address>
# Individual's name <submission email address> <email2> <emailN>
# Please keep the list sorted.
Vastech SA (PTY) LTD
Walter Schulze <awalterschulze@gmail.com>

View File

@@ -0,0 +1,18 @@
Anton Povarov <anton.povarov@gmail.com>
Clayton Coleman <ccoleman@redhat.com>
Denis Smirnov <denis.smirnov.91@gmail.com>
DongYun Kang <ceram1000@gmail.com>
Dwayne Schultz <dschultz@pivotal.io>
Georg Apitz <gapitz@pivotal.io>
Gustav Paul <gustav.paul@gmail.com>
Johan Brandhorst <johan.brandhorst@gmail.com>
John Shahid <jvshahid@gmail.com>
John Tuley <john@tuley.org>
Laurent <laurent@adyoulike.com>
Patrick Lee <patrick@dropbox.com>
Sergio Arbeo <serabe@gmail.com>
Stephen J Day <stephen.day@docker.com>
Tamir Duberstein <tamird@gmail.com>
Todd Eisenberger <teisenberger@dropbox.com>
Tormod Erevik Lea <tormodlea@gmail.com>
Walter Schulze <awalterschulze@gmail.com>

View File

@@ -0,0 +1,5 @@
The contributors to the Go protobuf repository:
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

View File

@@ -1,160 +0,0 @@
/*
Copyright 2012 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package groupcache
import (
"bytes"
"errors"
"io"
"strings"
)
// A ByteView holds an immutable view of bytes.
// Internally it wraps either a []byte or a string,
// but that detail is invisible to callers.
//
// A ByteView is meant to be used as a value type, not
// a pointer (like a time.Time).
type ByteView struct {
// If b is non-nil, b is used, else s is used.
b []byte
s string
}
// Len returns the view's length.
func (v ByteView) Len() int {
if v.b != nil {
return len(v.b)
}
return len(v.s)
}
// ByteSlice returns a copy of the data as a byte slice.
func (v ByteView) ByteSlice() []byte {
if v.b != nil {
return cloneBytes(v.b)
}
return []byte(v.s)
}
// String returns the data as a string, making a copy if necessary.
func (v ByteView) String() string {
if v.b != nil {
return string(v.b)
}
return v.s
}
// At returns the byte at index i.
func (v ByteView) At(i int) byte {
if v.b != nil {
return v.b[i]
}
return v.s[i]
}
// Slice slices the view between the provided from and to indices.
func (v ByteView) Slice(from, to int) ByteView {
if v.b != nil {
return ByteView{b: v.b[from:to]}
}
return ByteView{s: v.s[from:to]}
}
// SliceFrom slices the view from the provided index until the end.
func (v ByteView) SliceFrom(from int) ByteView {
if v.b != nil {
return ByteView{b: v.b[from:]}
}
return ByteView{s: v.s[from:]}
}
// Copy copies b into dest and returns the number of bytes copied.
func (v ByteView) Copy(dest []byte) int {
if v.b != nil {
return copy(dest, v.b)
}
return copy(dest, v.s)
}
// Equal returns whether the bytes in b are the same as the bytes in
// b2.
func (v ByteView) Equal(b2 ByteView) bool {
if b2.b == nil {
return v.EqualString(b2.s)
}
return v.EqualBytes(b2.b)
}
// EqualString returns whether the bytes in b are the same as the bytes
// in s.
func (v ByteView) EqualString(s string) bool {
if v.b == nil {
return v.s == s
}
l := v.Len()
if len(s) != l {
return false
}
for i, bi := range v.b {
if bi != s[i] {
return false
}
}
return true
}
// EqualBytes returns whether the bytes in b are the same as the bytes
// in b2.
func (v ByteView) EqualBytes(b2 []byte) bool {
if v.b != nil {
return bytes.Equal(v.b, b2)
}
l := v.Len()
if len(b2) != l {
return false
}
for i, bi := range b2 {
if bi != v.s[i] {
return false
}
}
return true
}
// Reader returns an io.ReadSeeker for the bytes in v.
func (v ByteView) Reader() io.ReadSeeker {
if v.b != nil {
return bytes.NewReader(v.b)
}
return strings.NewReader(v.s)
}
// ReadAt implements io.ReaderAt on the bytes in v.
func (v ByteView) ReadAt(p []byte, off int64) (n int, err error) {
if off < 0 {
return 0, errors.New("view: invalid offset")
}
if off >= int64(v.Len()) {
return 0, io.EOF
}
n = v.SliceFrom(int(off)).Copy(p)
if n < len(p) {
err = io.EOF
}
return
}

View File

@@ -1,489 +0,0 @@
/*
Copyright 2012 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package groupcache provides a data loading mechanism with caching
// and de-duplication that works across a set of peer processes.
//
// Each data Get first consults its local cache, otherwise delegates
// to the requested key's canonical owner, which then checks its cache
// or finally gets the data. In the common case, many concurrent
// cache misses across a set of peers for the same key result in just
// one cache fill.
package groupcache
import (
"errors"
"math/rand"
"strconv"
"sync"
"sync/atomic"
pb "github.com/golang/groupcache/groupcachepb"
"github.com/golang/groupcache/lru"
"github.com/golang/groupcache/singleflight"
)
// A Getter loads data for a key.
type Getter interface {
// Get returns the value identified by key, populating dest.
//
// The returned data must be unversioned. That is, key must
// uniquely describe the loaded data, without an implicit
// current time, and without relying on cache expiration
// mechanisms.
Get(ctx Context, key string, dest Sink) error
}
// A GetterFunc implements Getter with a function.
type GetterFunc func(ctx Context, key string, dest Sink) error
func (f GetterFunc) Get(ctx Context, key string, dest Sink) error {
return f(ctx, key, dest)
}
var (
mu sync.RWMutex
groups = make(map[string]*Group)
initPeerServerOnce sync.Once
initPeerServer func()
)
// GetGroup returns the named group previously created with NewGroup, or
// nil if there's no such group.
func GetGroup(name string) *Group {
mu.RLock()
g := groups[name]
mu.RUnlock()
return g
}
// NewGroup creates a coordinated group-aware Getter from a Getter.
//
// The returned Getter tries (but does not guarantee) to run only one
// Get call at once for a given key across an entire set of peer
// processes. Concurrent callers both in the local process and in
// other processes receive copies of the answer once the original Get
// completes.
//
// The group name must be unique for each getter.
func NewGroup(name string, cacheBytes int64, getter Getter) *Group {
return newGroup(name, cacheBytes, getter, nil)
}
// If peers is nil, the peerPicker is called via a sync.Once to initialize it.
func newGroup(name string, cacheBytes int64, getter Getter, peers PeerPicker) *Group {
if getter == nil {
panic("nil Getter")
}
mu.Lock()
defer mu.Unlock()
initPeerServerOnce.Do(callInitPeerServer)
if _, dup := groups[name]; dup {
panic("duplicate registration of group " + name)
}
g := &Group{
name: name,
getter: getter,
peers: peers,
cacheBytes: cacheBytes,
loadGroup: &singleflight.Group{},
}
if fn := newGroupHook; fn != nil {
fn(g)
}
groups[name] = g
return g
}
// newGroupHook, if non-nil, is called right after a new group is created.
var newGroupHook func(*Group)
// RegisterNewGroupHook registers a hook that is run each time
// a group is created.
func RegisterNewGroupHook(fn func(*Group)) {
if newGroupHook != nil {
panic("RegisterNewGroupHook called more than once")
}
newGroupHook = fn
}
// RegisterServerStart registers a hook that is run when the first
// group is created.
func RegisterServerStart(fn func()) {
if initPeerServer != nil {
panic("RegisterServerStart called more than once")
}
initPeerServer = fn
}
func callInitPeerServer() {
if initPeerServer != nil {
initPeerServer()
}
}
// A Group is a cache namespace and associated data loaded spread over
// a group of 1 or more machines.
type Group struct {
name string
getter Getter
peersOnce sync.Once
peers PeerPicker
cacheBytes int64 // limit for sum of mainCache and hotCache size
// mainCache is a cache of the keys for which this process
// (amongst its peers) is authoritative. That is, this cache
// contains keys which consistent hash on to this process's
// peer number.
mainCache cache
// hotCache contains keys/values for which this peer is not
// authoritative (otherwise they would be in mainCache), but
// are popular enough to warrant mirroring in this process to
// avoid going over the network to fetch from a peer. Having
// a hotCache avoids network hotspotting, where a peer's
// network card could become the bottleneck on a popular key.
// This cache is used sparingly to maximize the total number
// of key/value pairs that can be stored globally.
hotCache cache
// loadGroup ensures that each key is only fetched once
// (either locally or remotely), regardless of the number of
// concurrent callers.
loadGroup flightGroup
// Stats are statistics on the group.
Stats Stats
}
// flightGroup is defined as an interface which flightgroup.Group
// satisfies. We define this so that we may test with an alternate
// implementation.
type flightGroup interface {
// Done is called when Do is done.
Do(key string, fn func() (interface{}, error)) (interface{}, error)
}
// Stats are per-group statistics.
type Stats struct {
Gets AtomicInt // any Get request, including from peers
CacheHits AtomicInt // either cache was good
PeerLoads AtomicInt // either remote load or remote cache hit (not an error)
PeerErrors AtomicInt
Loads AtomicInt // (gets - cacheHits)
LoadsDeduped AtomicInt // after singleflight
LocalLoads AtomicInt // total good local loads
LocalLoadErrs AtomicInt // total bad local loads
ServerRequests AtomicInt // gets that came over the network from peers
}
// Name returns the name of the group.
func (g *Group) Name() string {
return g.name
}
func (g *Group) initPeers() {
if g.peers == nil {
g.peers = getPeers()
}
}
func (g *Group) Get(ctx Context, key string, dest Sink) error {
g.peersOnce.Do(g.initPeers)
g.Stats.Gets.Add(1)
if dest == nil {
return errors.New("groupcache: nil dest Sink")
}
value, cacheHit := g.lookupCache(key)
if cacheHit {
g.Stats.CacheHits.Add(1)
return setSinkView(dest, value)
}
// Optimization to avoid double unmarshalling or copying: keep
// track of whether the dest was already populated. One caller
// (if local) will set this; the losers will not. The common
// case will likely be one caller.
destPopulated := false
value, destPopulated, err := g.load(ctx, key, dest)
if err != nil {
return err
}
if destPopulated {
return nil
}
return setSinkView(dest, value)
}
// load loads key either by invoking the getter locally or by sending it to another machine.
func (g *Group) load(ctx Context, key string, dest Sink) (value ByteView, destPopulated bool, err error) {
g.Stats.Loads.Add(1)
viewi, err := g.loadGroup.Do(key, func() (interface{}, error) {
// Check the cache again because singleflight can only dedup calls
// that overlap concurrently. It's possible for 2 concurrent
// requests to miss the cache, resulting in 2 load() calls. An
// unfortunate goroutine scheduling would result in this callback
// being run twice, serially. If we don't check the cache again,
// cache.nbytes would be incremented below even though there will
// be only one entry for this key.
//
// Consider the following serialized event ordering for two
// goroutines in which this callback gets called twice for hte
// same key:
// 1: Get("key")
// 2: Get("key")
// 1: lookupCache("key")
// 2: lookupCache("key")
// 1: load("key")
// 2: load("key")
// 1: loadGroup.Do("key", fn)
// 1: fn()
// 2: loadGroup.Do("key", fn)
// 2: fn()
if value, cacheHit := g.lookupCache(key); cacheHit {
g.Stats.CacheHits.Add(1)
return value, nil
}
g.Stats.LoadsDeduped.Add(1)
var value ByteView
var err error
if peer, ok := g.peers.PickPeer(key); ok {
value, err = g.getFromPeer(ctx, peer, key)
if err == nil {
g.Stats.PeerLoads.Add(1)
return value, nil
}
g.Stats.PeerErrors.Add(1)
// TODO(bradfitz): log the peer's error? keep
// log of the past few for /groupcachez? It's
// probably boring (normal task movement), so not
// worth logging I imagine.
}
value, err = g.getLocally(ctx, key, dest)
if err != nil {
g.Stats.LocalLoadErrs.Add(1)
return nil, err
}
g.Stats.LocalLoads.Add(1)
destPopulated = true // only one caller of load gets this return value
g.populateCache(key, value, &g.mainCache)
return value, nil
})
if err == nil {
value = viewi.(ByteView)
}
return
}
func (g *Group) getLocally(ctx Context, key string, dest Sink) (ByteView, error) {
err := g.getter.Get(ctx, key, dest)
if err != nil {
return ByteView{}, err
}
return dest.view()
}
func (g *Group) getFromPeer(ctx Context, peer ProtoGetter, key string) (ByteView, error) {
req := &pb.GetRequest{
Group: &g.name,
Key: &key,
}
res := &pb.GetResponse{}
err := peer.Get(ctx, req, res)
if err != nil {
return ByteView{}, err
}
value := ByteView{b: res.Value}
// TODO(bradfitz): use res.MinuteQps or something smart to
// conditionally populate hotCache. For now just do it some
// percentage of the time.
if rand.Intn(10) == 0 {
g.populateCache(key, value, &g.hotCache)
}
return value, nil
}
func (g *Group) lookupCache(key string) (value ByteView, ok bool) {
if g.cacheBytes <= 0 {
return
}
value, ok = g.mainCache.get(key)
if ok {
return
}
value, ok = g.hotCache.get(key)
return
}
func (g *Group) populateCache(key string, value ByteView, cache *cache) {
if g.cacheBytes <= 0 {
return
}
cache.add(key, value)
// Evict items from cache(s) if necessary.
for {
mainBytes := g.mainCache.bytes()
hotBytes := g.hotCache.bytes()
if mainBytes+hotBytes <= g.cacheBytes {
return
}
// TODO(bradfitz): this is good-enough-for-now logic.
// It should be something based on measurements and/or
// respecting the costs of different resources.
victim := &g.mainCache
if hotBytes > mainBytes/8 {
victim = &g.hotCache
}
victim.removeOldest()
}
}
// CacheType represents a type of cache.
type CacheType int
const (
// The MainCache is the cache for items that this peer is the
// owner for.
MainCache CacheType = iota + 1
// The HotCache is the cache for items that seem popular
// enough to replicate to this node, even though it's not the
// owner.
HotCache
)
// CacheStats returns stats about the provided cache within the group.
func (g *Group) CacheStats(which CacheType) CacheStats {
switch which {
case MainCache:
return g.mainCache.stats()
case HotCache:
return g.hotCache.stats()
default:
return CacheStats{}
}
}
// cache is a wrapper around an *lru.Cache that adds synchronization,
// makes values always be ByteView, and counts the size of all keys and
// values.
type cache struct {
mu sync.RWMutex
nbytes int64 // of all keys and values
lru *lru.Cache
nhit, nget int64
nevict int64 // number of evictions
}
func (c *cache) stats() CacheStats {
c.mu.RLock()
defer c.mu.RUnlock()
return CacheStats{
Bytes: c.nbytes,
Items: c.itemsLocked(),
Gets: c.nget,
Hits: c.nhit,
Evictions: c.nevict,
}
}
func (c *cache) add(key string, value ByteView) {
c.mu.Lock()
defer c.mu.Unlock()
if c.lru == nil {
c.lru = &lru.Cache{
OnEvicted: func(key lru.Key, value interface{}) {
val := value.(ByteView)
c.nbytes -= int64(len(key.(string))) + int64(val.Len())
c.nevict++
},
}
}
c.lru.Add(key, value)
c.nbytes += int64(len(key)) + int64(value.Len())
}
func (c *cache) get(key string) (value ByteView, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
c.nget++
if c.lru == nil {
return
}
vi, ok := c.lru.Get(key)
if !ok {
return
}
c.nhit++
return vi.(ByteView), true
}
func (c *cache) removeOldest() {
c.mu.Lock()
defer c.mu.Unlock()
if c.lru != nil {
c.lru.RemoveOldest()
}
}
func (c *cache) bytes() int64 {
c.mu.RLock()
defer c.mu.RUnlock()
return c.nbytes
}
func (c *cache) items() int64 {
c.mu.RLock()
defer c.mu.RUnlock()
return c.itemsLocked()
}
func (c *cache) itemsLocked() int64 {
if c.lru == nil {
return 0
}
return int64(c.lru.Len())
}
// An AtomicInt is an int64 to be accessed atomically.
type AtomicInt int64
// Add atomically adds n to i.
func (i *AtomicInt) Add(n int64) {
atomic.AddInt64((*int64)(i), n)
}
// Get atomically gets the value of i.
func (i *AtomicInt) Get() int64 {
return atomic.LoadInt64((*int64)(i))
}
func (i *AtomicInt) String() string {
return strconv.FormatInt(i.Get(), 10)
}
// CacheStats are returned by stats accessors on Group.
type CacheStats struct {
Bytes int64
Items int64
Gets int64
Hits int64
Evictions int64
}

View File

@@ -1,227 +0,0 @@
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package groupcache
import (
"bytes"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"sync"
"github.com/golang/groupcache/consistenthash"
pb "github.com/golang/groupcache/groupcachepb"
"github.com/golang/protobuf/proto"
)
const defaultBasePath = "/_groupcache/"
const defaultReplicas = 50
// HTTPPool implements PeerPicker for a pool of HTTP peers.
type HTTPPool struct {
// Context optionally specifies a context for the server to use when it
// receives a request.
// If nil, the server uses a nil Context.
Context func(*http.Request) Context
// Transport optionally specifies an http.RoundTripper for the client
// to use when it makes a request.
// If nil, the client uses http.DefaultTransport.
Transport func(Context) http.RoundTripper
// this peer's base URL, e.g. "https://example.net:8000"
self string
// opts specifies the options.
opts HTTPPoolOptions
mu sync.Mutex // guards peers and httpGetters
peers *consistenthash.Map
httpGetters map[string]*httpGetter // keyed by e.g. "http://10.0.0.2:8008"
}
// HTTPPoolOptions are the configurations of a HTTPPool.
type HTTPPoolOptions struct {
// BasePath specifies the HTTP path that will serve groupcache requests.
// If blank, it defaults to "/_groupcache/".
BasePath string
// Replicas specifies the number of key replicas on the consistent hash.
// If blank, it defaults to 50.
Replicas int
// HashFn specifies the hash function of the consistent hash.
// If blank, it defaults to crc32.ChecksumIEEE.
HashFn consistenthash.Hash
}
// NewHTTPPool initializes an HTTP pool of peers, and registers itself as a PeerPicker.
// For convenience, it also registers itself as an http.Handler with http.DefaultServeMux.
// The self argument be a valid base URL that points to the current server,
// for example "http://example.net:8000".
func NewHTTPPool(self string) *HTTPPool {
p := NewHTTPPoolOpts(self, nil)
http.Handle(p.opts.BasePath, p)
return p
}
var httpPoolMade bool
// NewHTTPPoolOpts initializes an HTTP pool of peers with the given options.
// Unlike NewHTTPPool, this function does not register the created pool as an HTTP handler.
// The returned *HTTPPool implements http.Handler and must be registered using http.Handle.
func NewHTTPPoolOpts(self string, o *HTTPPoolOptions) *HTTPPool {
if httpPoolMade {
panic("groupcache: NewHTTPPool must be called only once")
}
httpPoolMade = true
p := &HTTPPool{
self: self,
httpGetters: make(map[string]*httpGetter),
}
if o != nil {
p.opts = *o
}
if p.opts.BasePath == "" {
p.opts.BasePath = defaultBasePath
}
if p.opts.Replicas == 0 {
p.opts.Replicas = defaultReplicas
}
p.peers = consistenthash.New(p.opts.Replicas, p.opts.HashFn)
RegisterPeerPicker(func() PeerPicker { return p })
return p
}
// Set updates the pool's list of peers.
// Each peer value should be a valid base URL,
// for example "http://example.net:8000".
func (p *HTTPPool) Set(peers ...string) {
p.mu.Lock()
defer p.mu.Unlock()
p.peers = consistenthash.New(p.opts.Replicas, p.opts.HashFn)
p.peers.Add(peers...)
p.httpGetters = make(map[string]*httpGetter, len(peers))
for _, peer := range peers {
p.httpGetters[peer] = &httpGetter{transport: p.Transport, baseURL: peer + p.opts.BasePath}
}
}
func (p *HTTPPool) PickPeer(key string) (ProtoGetter, bool) {
p.mu.Lock()
defer p.mu.Unlock()
if p.peers.IsEmpty() {
return nil, false
}
if peer := p.peers.Get(key); peer != p.self {
return p.httpGetters[peer], true
}
return nil, false
}
func (p *HTTPPool) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Parse request.
if !strings.HasPrefix(r.URL.Path, p.opts.BasePath) {
panic("HTTPPool serving unexpected path: " + r.URL.Path)
}
parts := strings.SplitN(r.URL.Path[len(p.opts.BasePath):], "/", 2)
if len(parts) != 2 {
http.Error(w, "bad request", http.StatusBadRequest)
return
}
groupName := parts[0]
key := parts[1]
// Fetch the value for this group/key.
group := GetGroup(groupName)
if group == nil {
http.Error(w, "no such group: "+groupName, http.StatusNotFound)
return
}
var ctx Context
if p.Context != nil {
ctx = p.Context(r)
}
group.Stats.ServerRequests.Add(1)
var value []byte
err := group.Get(ctx, key, AllocatingByteSliceSink(&value))
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Write the value to the response body as a proto message.
body, err := proto.Marshal(&pb.GetResponse{Value: value})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/x-protobuf")
w.Write(body)
}
type httpGetter struct {
transport func(Context) http.RoundTripper
baseURL string
}
var bufferPool = sync.Pool{
New: func() interface{} { return new(bytes.Buffer) },
}
func (h *httpGetter) Get(context Context, in *pb.GetRequest, out *pb.GetResponse) error {
u := fmt.Sprintf(
"%v%v/%v",
h.baseURL,
url.QueryEscape(in.GetGroup()),
url.QueryEscape(in.GetKey()),
)
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return err
}
tr := http.DefaultTransport
if h.transport != nil {
tr = h.transport(context)
}
res, err := tr.RoundTrip(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return fmt.Errorf("server returned: %v", res.Status)
}
b := bufferPool.Get().(*bytes.Buffer)
b.Reset()
defer bufferPool.Put(b)
_, err = io.Copy(b, res.Body)
if err != nil {
return fmt.Errorf("reading response body: %v", err)
}
err = proto.Unmarshal(b.Bytes(), out)
if err != nil {
return fmt.Errorf("decoding response body: %v", err)
}
return nil
}

View File

@@ -1,71 +0,0 @@
/*
Copyright 2012 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// peers.go defines how processes find and communicate with their peers.
package groupcache
import (
pb "github.com/golang/groupcache/groupcachepb"
)
// Context is an opaque value passed through calls to the
// ProtoGetter. It may be nil if your ProtoGetter implementation does
// not require a context.
type Context interface{}
// ProtoGetter is the interface that must be implemented by a peer.
type ProtoGetter interface {
Get(context Context, in *pb.GetRequest, out *pb.GetResponse) error
}
// PeerPicker is the interface that must be implemented to locate
// the peer that owns a specific key.
type PeerPicker interface {
// PickPeer returns the peer that owns the specific key
// and true to indicate that a remote peer was nominated.
// It returns nil, false if the key owner is the current peer.
PickPeer(key string) (peer ProtoGetter, ok bool)
}
// NoPeers is an implementation of PeerPicker that never finds a peer.
type NoPeers struct{}
func (NoPeers) PickPeer(key string) (peer ProtoGetter, ok bool) { return }
var (
portPicker func() PeerPicker
)
// RegisterPeerPicker registers the peer initialization function.
// It is called once, when the first group is created.
func RegisterPeerPicker(fn func() PeerPicker) {
if portPicker != nil {
panic("RegisterPeerPicker called more than once")
}
portPicker = fn
}
func getPeers() PeerPicker {
if portPicker == nil {
return NoPeers{}
}
pk := portPicker()
if pk == nil {
pk = NoPeers{}
}
return pk
}

View File

@@ -1,322 +0,0 @@
/*
Copyright 2012 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package groupcache
import (
"errors"
"github.com/golang/protobuf/proto"
)
// A Sink receives data from a Get call.
//
// Implementation of Getter must call exactly one of the Set methods
// on success.
type Sink interface {
// SetString sets the value to s.
SetString(s string) error
// SetBytes sets the value to the contents of v.
// The caller retains ownership of v.
SetBytes(v []byte) error
// SetProto sets the value to the encoded version of m.
// The caller retains ownership of m.
SetProto(m proto.Message) error
// view returns a frozen view of the bytes for caching.
view() (ByteView, error)
}
func cloneBytes(b []byte) []byte {
c := make([]byte, len(b))
copy(c, b)
return c
}
func setSinkView(s Sink, v ByteView) error {
// A viewSetter is a Sink that can also receive its value from
// a ByteView. This is a fast path to minimize copies when the
// item was already cached locally in memory (where it's
// cached as a ByteView)
type viewSetter interface {
setView(v ByteView) error
}
if vs, ok := s.(viewSetter); ok {
return vs.setView(v)
}
if v.b != nil {
return s.SetBytes(v.b)
}
return s.SetString(v.s)
}
// StringSink returns a Sink that populates the provided string pointer.
func StringSink(sp *string) Sink {
return &stringSink{sp: sp}
}
type stringSink struct {
sp *string
v ByteView
// TODO(bradfitz): track whether any Sets were called.
}
func (s *stringSink) view() (ByteView, error) {
// TODO(bradfitz): return an error if no Set was called
return s.v, nil
}
func (s *stringSink) SetString(v string) error {
s.v.b = nil
s.v.s = v
*s.sp = v
return nil
}
func (s *stringSink) SetBytes(v []byte) error {
return s.SetString(string(v))
}
func (s *stringSink) SetProto(m proto.Message) error {
b, err := proto.Marshal(m)
if err != nil {
return err
}
s.v.b = b
*s.sp = string(b)
return nil
}
// ByteViewSink returns a Sink that populates a ByteView.
func ByteViewSink(dst *ByteView) Sink {
if dst == nil {
panic("nil dst")
}
return &byteViewSink{dst: dst}
}
type byteViewSink struct {
dst *ByteView
// if this code ever ends up tracking that at least one set*
// method was called, don't make it an error to call set
// methods multiple times. Lorry's payload.go does that, and
// it makes sense. The comment at the top of this file about
// "exactly one of the Set methods" is overly strict. We
// really care about at least once (in a handler), but if
// multiple handlers fail (or multiple functions in a program
// using a Sink), it's okay to re-use the same one.
}
func (s *byteViewSink) setView(v ByteView) error {
*s.dst = v
return nil
}
func (s *byteViewSink) view() (ByteView, error) {
return *s.dst, nil
}
func (s *byteViewSink) SetProto(m proto.Message) error {
b, err := proto.Marshal(m)
if err != nil {
return err
}
*s.dst = ByteView{b: b}
return nil
}
func (s *byteViewSink) SetBytes(b []byte) error {
*s.dst = ByteView{b: cloneBytes(b)}
return nil
}
func (s *byteViewSink) SetString(v string) error {
*s.dst = ByteView{s: v}
return nil
}
// ProtoSink returns a sink that unmarshals binary proto values into m.
func ProtoSink(m proto.Message) Sink {
return &protoSink{
dst: m,
}
}
type protoSink struct {
dst proto.Message // authorative value
typ string
v ByteView // encoded
}
func (s *protoSink) view() (ByteView, error) {
return s.v, nil
}
func (s *protoSink) SetBytes(b []byte) error {
err := proto.Unmarshal(b, s.dst)
if err != nil {
return err
}
s.v.b = cloneBytes(b)
s.v.s = ""
return nil
}
func (s *protoSink) SetString(v string) error {
b := []byte(v)
err := proto.Unmarshal(b, s.dst)
if err != nil {
return err
}
s.v.b = b
s.v.s = ""
return nil
}
func (s *protoSink) SetProto(m proto.Message) error {
b, err := proto.Marshal(m)
if err != nil {
return err
}
// TODO(bradfitz): optimize for same-task case more and write
// right through? would need to document ownership rules at
// the same time. but then we could just assign *dst = *m
// here. This works for now:
err = proto.Unmarshal(b, s.dst)
if err != nil {
return err
}
s.v.b = b
s.v.s = ""
return nil
}
// AllocatingByteSliceSink returns a Sink that allocates
// a byte slice to hold the received value and assigns
// it to *dst. The memory is not retained by groupcache.
func AllocatingByteSliceSink(dst *[]byte) Sink {
return &allocBytesSink{dst: dst}
}
type allocBytesSink struct {
dst *[]byte
v ByteView
}
func (s *allocBytesSink) view() (ByteView, error) {
return s.v, nil
}
func (s *allocBytesSink) setView(v ByteView) error {
if v.b != nil {
*s.dst = cloneBytes(v.b)
} else {
*s.dst = []byte(v.s)
}
s.v = v
return nil
}
func (s *allocBytesSink) SetProto(m proto.Message) error {
b, err := proto.Marshal(m)
if err != nil {
return err
}
return s.setBytesOwned(b)
}
func (s *allocBytesSink) SetBytes(b []byte) error {
return s.setBytesOwned(cloneBytes(b))
}
func (s *allocBytesSink) setBytesOwned(b []byte) error {
if s.dst == nil {
return errors.New("nil AllocatingByteSliceSink *[]byte dst")
}
*s.dst = cloneBytes(b) // another copy, protecting the read-only s.v.b view
s.v.b = b
s.v.s = ""
return nil
}
func (s *allocBytesSink) SetString(v string) error {
if s.dst == nil {
return errors.New("nil AllocatingByteSliceSink *[]byte dst")
}
*s.dst = []byte(v)
s.v.b = nil
s.v.s = v
return nil
}
// TruncatingByteSliceSink returns a Sink that writes up to len(*dst)
// bytes to *dst. If more bytes are available, they're silently
// truncated. If fewer bytes are available than len(*dst), *dst
// is shrunk to fit the number of bytes available.
func TruncatingByteSliceSink(dst *[]byte) Sink {
return &truncBytesSink{dst: dst}
}
type truncBytesSink struct {
dst *[]byte
v ByteView
}
func (s *truncBytesSink) view() (ByteView, error) {
return s.v, nil
}
func (s *truncBytesSink) SetProto(m proto.Message) error {
b, err := proto.Marshal(m)
if err != nil {
return err
}
return s.setBytesOwned(b)
}
func (s *truncBytesSink) SetBytes(b []byte) error {
return s.setBytesOwned(cloneBytes(b))
}
func (s *truncBytesSink) setBytesOwned(b []byte) error {
if s.dst == nil {
return errors.New("nil TruncatingByteSliceSink *[]byte dst")
}
n := copy(*s.dst, b)
if n < len(*s.dst) {
*s.dst = (*s.dst)[:n]
}
s.v.b = b
s.v.s = ""
return nil
}
func (s *truncBytesSink) SetString(v string) error {
if s.dst == nil {
return errors.New("nil TruncatingByteSliceSink *[]byte dst")
}
n := copy(*s.dst, v)
if n < len(*s.dst) {
*s.dst = (*s.dst)[:n]
}
s.v.b = nil
s.v.s = v
return nil
}

Some files were not shown because too many files have changed in this diff Show More