mirror of
https://github.com/openshift/source-to-image.git
synced 2026-02-05 12:44:54 +01:00
297
Godeps/Godeps.json
generated
297
Godeps/Godeps.json
generated
@@ -1,297 +0,0 @@
|
||||
{
|
||||
"ImportPath": "github.com/openshift/source-to-image",
|
||||
"GoVersion": "go1.9",
|
||||
"GodepVersion": "v79",
|
||||
"Packages": [
|
||||
"golang.org/x/crypto/ssh/terminal",
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/Azure/go-ansiterm",
|
||||
"Rev": "19f72df4d05d31cbe1c56bfc8045c96babff6c7e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/go-ansiterm/winterm",
|
||||
"Rev": "19f72df4d05d31cbe1c56bfc8045c96babff6c7e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Microsoft/go-winio",
|
||||
"Comment": "v0.4.5",
|
||||
"Rev": "78439966b38d69bf38227fbf57ac8a6fee70f69a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Nvveen/Gotty",
|
||||
"Rev": "cd527374f1e5bff4938207604a14f2e38a9cf512"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/distribution/digestset",
|
||||
"Comment": "v2.6.0-rc.1-209-gedc3ab29",
|
||||
"Rev": "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/distribution/reference",
|
||||
"Comment": "v2.6.0-rc.1-209-gedc3ab29",
|
||||
"Rev": "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/blkiodev",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/container",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/events",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/filters",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/image",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/mount",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/network",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/registry",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/strslice",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/swarm",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/swarm/runtime",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/time",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/versions",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/volume",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/builder/dockerfile/command",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/builder/dockerfile/parser",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/cli/config",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/client",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/homedir",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/idtools",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/ioutils",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/jsonlog",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/jsonmessage",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/longpath",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/mount",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/stdcopy",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/system",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/term",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/term/windows",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/tlsconfig",
|
||||
"Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1",
|
||||
"Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/go-connections/nat",
|
||||
"Comment": "v0.3.0",
|
||||
"Rev": "3ede32e2033de7505e6500d6c868c2b9ed9f169d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/go-connections/sockets",
|
||||
"Comment": "v0.3.0",
|
||||
"Rev": "3ede32e2033de7505e6500d6c868c2b9ed9f169d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/go-connections/tlsconfig",
|
||||
"Comment": "v0.3.0",
|
||||
"Rev": "3ede32e2033de7505e6500d6c868c2b9ed9f169d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/go-units",
|
||||
"Comment": "v0.3.1-11-g9e638d3",
|
||||
"Rev": "9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/libtrust",
|
||||
"Rev": "9cbd2a1374f46905c68a4eb3694a130610adc62a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/proto",
|
||||
"Comment": "v0.4-3-gc0656edd",
|
||||
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/glog",
|
||||
"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/mousetrap",
|
||||
"Comment": "v1.0",
|
||||
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/go-digest",
|
||||
"Rev": "a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/image-spec/specs-go",
|
||||
"Comment": "v1.0.0-rc6-12-g372ad78",
|
||||
"Rev": "372ad780f63454fbbbbcc7cf80e5b90245c13e13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/image-spec/specs-go/v1",
|
||||
"Comment": "v1.0.0-rc6-12-g372ad78",
|
||||
"Rev": "372ad780f63454fbbbbcc7cf80e5b90245c13e13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/user",
|
||||
"Comment": "v1.0.0-rc4-50-g4d6e6720",
|
||||
"Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pkg/errors",
|
||||
"Comment": "v0.7.0-13-ga221380",
|
||||
"Rev": "a22138067af1c4942683050411a841ade67fe1eb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/sirupsen/logrus",
|
||||
"Comment": "v1.0.3-11-g89742ae",
|
||||
"Rev": "89742aefa4b206dcf400792f3bd35b542998eb3b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/cobra",
|
||||
"Rev": "7c674d9e72017ed25f6d2b5e497a1368086b6a6f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh/terminal",
|
||||
"Rev": "81e90905daefcd6fd217b62423c0908922eadb30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/proxy",
|
||||
"Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/windows",
|
||||
"Rev": "7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce"
|
||||
}
|
||||
]
|
||||
}
|
||||
5
Godeps/Readme
generated
5
Godeps/Readme
generated
@@ -1,5 +0,0 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
||||
105
glide.lock
generated
Normal file
105
glide.lock
generated
Normal file
@@ -0,0 +1,105 @@
|
||||
hash: edca5c32b67b93cfc6992dfbd7b429f7e3e5c99929d0f7b156a7777132e19eb0
|
||||
updated: 2018-03-08T15:16:10.063277504-05:00
|
||||
imports:
|
||||
- name: github.com/Azure/go-ansiterm
|
||||
version: 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
|
||||
subpackages:
|
||||
- winterm
|
||||
- name: github.com/docker/distribution
|
||||
version: edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
|
||||
subpackages:
|
||||
- digestset
|
||||
- reference
|
||||
- name: github.com/docker/docker
|
||||
version: 4f3616fb1c112e206b88cb7a9922bf49067a7756
|
||||
repo: git@github.com:openshift/moby-moby
|
||||
subpackages:
|
||||
- api
|
||||
- api/types
|
||||
- api/types/blkiodev
|
||||
- api/types/container
|
||||
- api/types/events
|
||||
- api/types/filters
|
||||
- api/types/image
|
||||
- api/types/mount
|
||||
- api/types/network
|
||||
- api/types/registry
|
||||
- api/types/strslice
|
||||
- api/types/swarm
|
||||
- api/types/swarm/runtime
|
||||
- api/types/time
|
||||
- api/types/versions
|
||||
- api/types/volume
|
||||
- builder/dockerfile/command
|
||||
- builder/dockerfile/parser
|
||||
- cli/config
|
||||
- client
|
||||
- pkg/homedir
|
||||
- pkg/idtools
|
||||
- pkg/ioutils
|
||||
- pkg/jsonlog
|
||||
- pkg/jsonmessage
|
||||
- pkg/longpath
|
||||
- pkg/mount
|
||||
- pkg/stdcopy
|
||||
- pkg/system
|
||||
- pkg/term
|
||||
- pkg/term/windows
|
||||
- pkg/tlsconfig
|
||||
- name: github.com/docker/go-connections
|
||||
version: 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||
subpackages:
|
||||
- nat
|
||||
- sockets
|
||||
- tlsconfig
|
||||
- name: github.com/docker/go-units
|
||||
version: 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
|
||||
- name: github.com/docker/libtrust
|
||||
version: 9cbd2a1374f46905c68a4eb3694a130610adc62a
|
||||
- name: github.com/gogo/protobuf
|
||||
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7
|
||||
subpackages:
|
||||
- proto
|
||||
- name: github.com/golang/glog
|
||||
version: 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
- name: github.com/inconshreveable/mousetrap
|
||||
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
- name: github.com/Microsoft/go-winio
|
||||
version: 7da180ee92d8bd8bb8c37fc560e673e6557c392f
|
||||
- name: github.com/Nvveen/Gotty
|
||||
version: cd527374f1e5bff4938207604a14f2e38a9cf512
|
||||
- name: github.com/opencontainers/go-digest
|
||||
version: a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
|
||||
- name: github.com/opencontainers/image-spec
|
||||
version: 372ad780f63454fbbbbcc7cf80e5b90245c13e13
|
||||
subpackages:
|
||||
- specs-go
|
||||
- specs-go/v1
|
||||
- name: github.com/opencontainers/runc
|
||||
version: 4d6e6720a7c885c37b4cb083c0d372dda3425120
|
||||
subpackages:
|
||||
- libcontainer/user
|
||||
- name: github.com/pkg/errors
|
||||
version: a22138067af1c4942683050411a841ade67fe1eb
|
||||
- name: github.com/sirupsen/logrus
|
||||
version: 89742aefa4b206dcf400792f3bd35b542998eb3b
|
||||
- name: github.com/spf13/cobra
|
||||
version: 7c674d9e72017ed25f6d2b5e497a1368086b6a6f
|
||||
- name: github.com/spf13/pflag
|
||||
version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7
|
||||
- name: golang.org/x/crypto
|
||||
version: 81e90905daefcd6fd217b62423c0908922eadb30
|
||||
subpackages:
|
||||
- ssh/terminal
|
||||
- name: golang.org/x/net
|
||||
version: 1c05540f6879653db88113bc4a2b70aec4bd491f
|
||||
subpackages:
|
||||
- context
|
||||
- context/ctxhttp
|
||||
- proxy
|
||||
- name: golang.org/x/sys
|
||||
version: 7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce
|
||||
subpackages:
|
||||
- unix
|
||||
- windows
|
||||
testImports: []
|
||||
103
glide.yaml
Normal file
103
glide.yaml
Normal file
@@ -0,0 +1,103 @@
|
||||
package: github.com/openshift/source-to-image
|
||||
import:
|
||||
- package: github.com/Azure/go-ansiterm
|
||||
version: 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
|
||||
subpackages:
|
||||
- winterm
|
||||
- package: github.com/Microsoft/go-winio
|
||||
version: ^0.4.5
|
||||
- package: github.com/Nvveen/Gotty
|
||||
version: cd527374f1e5bff4938207604a14f2e38a9cf512
|
||||
- package: github.com/docker/distribution
|
||||
version: edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
|
||||
subpackages:
|
||||
- digestset
|
||||
- reference
|
||||
- package: github.com/docker/docker
|
||||
repo: git@github.com:openshift/moby-moby
|
||||
version: 4f3616fb1c112e206b88cb7a9922bf49067a7756
|
||||
subpackages:
|
||||
- api
|
||||
- api/types
|
||||
- api/types/blkiodev
|
||||
- api/types/container
|
||||
- api/types/events
|
||||
- api/types/filters
|
||||
- api/types/image
|
||||
- api/types/mount
|
||||
- api/types/network
|
||||
- api/types/registry
|
||||
- api/types/strslice
|
||||
- api/types/swarm
|
||||
- api/types/swarm/runtime
|
||||
- api/types/time
|
||||
- api/types/versions
|
||||
- api/types/volume
|
||||
- builder/dockerfile/command
|
||||
- builder/dockerfile/parser
|
||||
- cli/config
|
||||
- client
|
||||
- pkg/homedir
|
||||
- pkg/idtools
|
||||
- pkg/ioutils
|
||||
- pkg/jsonlog
|
||||
- pkg/jsonmessage
|
||||
- pkg/longpath
|
||||
- pkg/mount
|
||||
- pkg/stdcopy
|
||||
- pkg/system
|
||||
- pkg/term
|
||||
- pkg/term/windows
|
||||
- pkg/tlsconfig
|
||||
- package: github.com/docker/go-connections
|
||||
version: ^0.3.0
|
||||
subpackages:
|
||||
- nat
|
||||
- sockets
|
||||
- tlsconfig
|
||||
- package: github.com/docker/go-units
|
||||
version: 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
|
||||
- package: github.com/docker/libtrust
|
||||
version: 9cbd2a1374f46905c68a4eb3694a130610adc62a
|
||||
- package: github.com/gogo/protobuf
|
||||
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7
|
||||
subpackages:
|
||||
- proto
|
||||
- package: github.com/golang/glog
|
||||
version: 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
- package: github.com/inconshreveable/mousetrap
|
||||
version: ^1.0.0
|
||||
- package: github.com/opencontainers/go-digest
|
||||
version: a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
|
||||
- package: github.com/opencontainers/image-spec
|
||||
version: 372ad780f63454fbbbbcc7cf80e5b90245c13e13
|
||||
subpackages:
|
||||
- specs-go
|
||||
- specs-go/v1
|
||||
- package: github.com/opencontainers/runc
|
||||
version: 4d6e6720a7c885c37b4cb083c0d372dda3425120
|
||||
subpackages:
|
||||
- libcontainer/user
|
||||
- package: github.com/pkg/errors
|
||||
version: a22138067af1c4942683050411a841ade67fe1eb
|
||||
- package: github.com/sirupsen/logrus
|
||||
version: 89742aefa4b206dcf400792f3bd35b542998eb3b
|
||||
- package: github.com/spf13/cobra
|
||||
version: 7c674d9e72017ed25f6d2b5e497a1368086b6a6f
|
||||
- package: github.com/spf13/pflag
|
||||
version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7
|
||||
- package: golang.org/x/crypto
|
||||
version: 81e90905daefcd6fd217b62423c0908922eadb30
|
||||
subpackages:
|
||||
- ssh/terminal
|
||||
- package: golang.org/x/net
|
||||
version: 1c05540f6879653db88113bc4a2b70aec4bd491f
|
||||
subpackages:
|
||||
- context
|
||||
- context/ctxhttp
|
||||
- proxy
|
||||
- package: golang.org/x/sys
|
||||
version: 7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce
|
||||
subpackages:
|
||||
- unix
|
||||
- windows
|
||||
27
vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
generated
vendored
Normal file
27
vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
344
vendor/github.com/Microsoft/go-winio/archive/tar/common.go
generated
vendored
Normal file
344
vendor/github.com/Microsoft/go-winio/archive/tar/common.go
generated
vendored
Normal file
@@ -0,0 +1,344 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tar implements access to tar archives.
|
||||
// It aims to cover most of the variations, including those produced
|
||||
// by GNU and BSD tars.
|
||||
//
|
||||
// References:
|
||||
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
|
||||
// http://www.gnu.org/software/tar/manual/html_node/Standard.html
|
||||
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
blockSize = 512
|
||||
|
||||
// Types
|
||||
TypeReg = '0' // regular file
|
||||
TypeRegA = '\x00' // regular file
|
||||
TypeLink = '1' // hard link
|
||||
TypeSymlink = '2' // symbolic link
|
||||
TypeChar = '3' // character device node
|
||||
TypeBlock = '4' // block device node
|
||||
TypeDir = '5' // directory
|
||||
TypeFifo = '6' // fifo node
|
||||
TypeCont = '7' // reserved
|
||||
TypeXHeader = 'x' // extended header
|
||||
TypeXGlobalHeader = 'g' // global extended header
|
||||
TypeGNULongName = 'L' // Next file has a long name
|
||||
TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
|
||||
TypeGNUSparse = 'S' // sparse file
|
||||
)
|
||||
|
||||
// A Header represents a single header in a tar archive.
|
||||
// Some fields may not be populated.
|
||||
type Header struct {
|
||||
Name string // name of header file entry
|
||||
Mode int64 // permission and mode bits
|
||||
Uid int // user id of owner
|
||||
Gid int // group id of owner
|
||||
Size int64 // length in bytes
|
||||
ModTime time.Time // modified time
|
||||
Typeflag byte // type of header entry
|
||||
Linkname string // target name of link
|
||||
Uname string // user name of owner
|
||||
Gname string // group name of owner
|
||||
Devmajor int64 // major number of character or block device
|
||||
Devminor int64 // minor number of character or block device
|
||||
AccessTime time.Time // access time
|
||||
ChangeTime time.Time // status change time
|
||||
CreationTime time.Time // creation time
|
||||
Xattrs map[string]string
|
||||
Winheaders map[string]string
|
||||
}
|
||||
|
||||
// File name constants from the tar spec.
|
||||
const (
|
||||
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
|
||||
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
|
||||
)
|
||||
|
||||
// FileInfo returns an os.FileInfo for the Header.
|
||||
func (h *Header) FileInfo() os.FileInfo {
|
||||
return headerFileInfo{h}
|
||||
}
|
||||
|
||||
// headerFileInfo implements os.FileInfo.
|
||||
type headerFileInfo struct {
|
||||
h *Header
|
||||
}
|
||||
|
||||
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
|
||||
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
|
||||
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
|
||||
func (fi headerFileInfo) Sys() interface{} { return fi.h }
|
||||
|
||||
// Name returns the base name of the file.
|
||||
func (fi headerFileInfo) Name() string {
|
||||
if fi.IsDir() {
|
||||
return path.Base(path.Clean(fi.h.Name))
|
||||
}
|
||||
return path.Base(fi.h.Name)
|
||||
}
|
||||
|
||||
// Mode returns the permission and mode bits for the headerFileInfo.
|
||||
func (fi headerFileInfo) Mode() (mode os.FileMode) {
|
||||
// Set file permission bits.
|
||||
mode = os.FileMode(fi.h.Mode).Perm()
|
||||
|
||||
// Set setuid, setgid and sticky bits.
|
||||
if fi.h.Mode&c_ISUID != 0 {
|
||||
// setuid
|
||||
mode |= os.ModeSetuid
|
||||
}
|
||||
if fi.h.Mode&c_ISGID != 0 {
|
||||
// setgid
|
||||
mode |= os.ModeSetgid
|
||||
}
|
||||
if fi.h.Mode&c_ISVTX != 0 {
|
||||
// sticky
|
||||
mode |= os.ModeSticky
|
||||
}
|
||||
|
||||
// Set file mode bits.
|
||||
// clear perm, setuid, setgid and sticky bits.
|
||||
m := os.FileMode(fi.h.Mode) &^ 07777
|
||||
if m == c_ISDIR {
|
||||
// directory
|
||||
mode |= os.ModeDir
|
||||
}
|
||||
if m == c_ISFIFO {
|
||||
// named pipe (FIFO)
|
||||
mode |= os.ModeNamedPipe
|
||||
}
|
||||
if m == c_ISLNK {
|
||||
// symbolic link
|
||||
mode |= os.ModeSymlink
|
||||
}
|
||||
if m == c_ISBLK {
|
||||
// device file
|
||||
mode |= os.ModeDevice
|
||||
}
|
||||
if m == c_ISCHR {
|
||||
// Unix character device
|
||||
mode |= os.ModeDevice
|
||||
mode |= os.ModeCharDevice
|
||||
}
|
||||
if m == c_ISSOCK {
|
||||
// Unix domain socket
|
||||
mode |= os.ModeSocket
|
||||
}
|
||||
|
||||
switch fi.h.Typeflag {
|
||||
case TypeSymlink:
|
||||
// symbolic link
|
||||
mode |= os.ModeSymlink
|
||||
case TypeChar:
|
||||
// character device node
|
||||
mode |= os.ModeDevice
|
||||
mode |= os.ModeCharDevice
|
||||
case TypeBlock:
|
||||
// block device node
|
||||
mode |= os.ModeDevice
|
||||
case TypeDir:
|
||||
// directory
|
||||
mode |= os.ModeDir
|
||||
case TypeFifo:
|
||||
// fifo node
|
||||
mode |= os.ModeNamedPipe
|
||||
}
|
||||
|
||||
return mode
|
||||
}
|
||||
|
||||
// sysStat, if non-nil, populates h from system-dependent fields of fi.
|
||||
var sysStat func(fi os.FileInfo, h *Header) error
|
||||
|
||||
// Mode constants from the tar spec.
|
||||
const (
|
||||
c_ISUID = 04000 // Set uid
|
||||
c_ISGID = 02000 // Set gid
|
||||
c_ISVTX = 01000 // Save text (sticky bit)
|
||||
c_ISDIR = 040000 // Directory
|
||||
c_ISFIFO = 010000 // FIFO
|
||||
c_ISREG = 0100000 // Regular file
|
||||
c_ISLNK = 0120000 // Symbolic link
|
||||
c_ISBLK = 060000 // Block special file
|
||||
c_ISCHR = 020000 // Character special file
|
||||
c_ISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
// Keywords for the PAX Extended Header
|
||||
const (
|
||||
paxAtime = "atime"
|
||||
paxCharset = "charset"
|
||||
paxComment = "comment"
|
||||
paxCtime = "ctime" // please note that ctime is not a valid pax header.
|
||||
paxCreationTime = "LIBARCHIVE.creationtime"
|
||||
paxGid = "gid"
|
||||
paxGname = "gname"
|
||||
paxLinkpath = "linkpath"
|
||||
paxMtime = "mtime"
|
||||
paxPath = "path"
|
||||
paxSize = "size"
|
||||
paxUid = "uid"
|
||||
paxUname = "uname"
|
||||
paxXattr = "SCHILY.xattr."
|
||||
paxWindows = "MSWINDOWS."
|
||||
paxNone = ""
|
||||
)
|
||||
|
||||
// FileInfoHeader creates a partially-populated Header from fi.
|
||||
// If fi describes a symlink, FileInfoHeader records link as the link target.
|
||||
// If fi describes a directory, a slash is appended to the name.
|
||||
// Because os.FileInfo's Name method returns only the base name of
|
||||
// the file it describes, it may be necessary to modify the Name field
|
||||
// of the returned header to provide the full path name of the file.
|
||||
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
|
||||
if fi == nil {
|
||||
return nil, errors.New("tar: FileInfo is nil")
|
||||
}
|
||||
fm := fi.Mode()
|
||||
h := &Header{
|
||||
Name: fi.Name(),
|
||||
ModTime: fi.ModTime(),
|
||||
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
|
||||
}
|
||||
switch {
|
||||
case fm.IsRegular():
|
||||
h.Mode |= c_ISREG
|
||||
h.Typeflag = TypeReg
|
||||
h.Size = fi.Size()
|
||||
case fi.IsDir():
|
||||
h.Typeflag = TypeDir
|
||||
h.Mode |= c_ISDIR
|
||||
h.Name += "/"
|
||||
case fm&os.ModeSymlink != 0:
|
||||
h.Typeflag = TypeSymlink
|
||||
h.Mode |= c_ISLNK
|
||||
h.Linkname = link
|
||||
case fm&os.ModeDevice != 0:
|
||||
if fm&os.ModeCharDevice != 0 {
|
||||
h.Mode |= c_ISCHR
|
||||
h.Typeflag = TypeChar
|
||||
} else {
|
||||
h.Mode |= c_ISBLK
|
||||
h.Typeflag = TypeBlock
|
||||
}
|
||||
case fm&os.ModeNamedPipe != 0:
|
||||
h.Typeflag = TypeFifo
|
||||
h.Mode |= c_ISFIFO
|
||||
case fm&os.ModeSocket != 0:
|
||||
h.Mode |= c_ISSOCK
|
||||
default:
|
||||
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
|
||||
}
|
||||
if fm&os.ModeSetuid != 0 {
|
||||
h.Mode |= c_ISUID
|
||||
}
|
||||
if fm&os.ModeSetgid != 0 {
|
||||
h.Mode |= c_ISGID
|
||||
}
|
||||
if fm&os.ModeSticky != 0 {
|
||||
h.Mode |= c_ISVTX
|
||||
}
|
||||
// If possible, populate additional fields from OS-specific
|
||||
// FileInfo fields.
|
||||
if sys, ok := fi.Sys().(*Header); ok {
|
||||
// This FileInfo came from a Header (not the OS). Use the
|
||||
// original Header to populate all remaining fields.
|
||||
h.Uid = sys.Uid
|
||||
h.Gid = sys.Gid
|
||||
h.Uname = sys.Uname
|
||||
h.Gname = sys.Gname
|
||||
h.AccessTime = sys.AccessTime
|
||||
h.ChangeTime = sys.ChangeTime
|
||||
if sys.Xattrs != nil {
|
||||
h.Xattrs = make(map[string]string)
|
||||
for k, v := range sys.Xattrs {
|
||||
h.Xattrs[k] = v
|
||||
}
|
||||
}
|
||||
if sys.Typeflag == TypeLink {
|
||||
// hard link
|
||||
h.Typeflag = TypeLink
|
||||
h.Size = 0
|
||||
h.Linkname = sys.Linkname
|
||||
}
|
||||
}
|
||||
if sysStat != nil {
|
||||
return h, sysStat(fi, h)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
var zeroBlock = make([]byte, blockSize)
|
||||
|
||||
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
|
||||
// We compute and return both.
|
||||
func checksum(header []byte) (unsigned int64, signed int64) {
|
||||
for i := 0; i < len(header); i++ {
|
||||
if i == 148 {
|
||||
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
|
||||
unsigned += ' ' * 8
|
||||
signed += ' ' * 8
|
||||
i += 7
|
||||
continue
|
||||
}
|
||||
unsigned += int64(header[i])
|
||||
signed += int64(int8(header[i]))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type slicer []byte
|
||||
|
||||
func (sp *slicer) next(n int) (b []byte) {
|
||||
s := *sp
|
||||
b, *sp = s[0:n], s[n:]
|
||||
return
|
||||
}
|
||||
|
||||
func isASCII(s string) bool {
|
||||
for _, c := range s {
|
||||
if c >= 0x80 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toASCII(s string) string {
|
||||
if isASCII(s) {
|
||||
return s
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
for _, c := range s {
|
||||
if c < 0x80 {
|
||||
buf.WriteByte(byte(c))
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// isHeaderOnlyType checks if the given type flag is of the type that has no
|
||||
// data section even if a size is specified.
|
||||
func isHeaderOnlyType(flag byte) bool {
|
||||
switch flag {
|
||||
case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
80
vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go
generated
vendored
Normal file
80
vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar_test
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
// Create a buffer to write our archive to.
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// Create a new tar archive.
|
||||
tw := tar.NewWriter(buf)
|
||||
|
||||
// Add some files to the archive.
|
||||
var files = []struct {
|
||||
Name, Body string
|
||||
}{
|
||||
{"readme.txt", "This archive contains some text files."},
|
||||
{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
|
||||
{"todo.txt", "Get animal handling license."},
|
||||
}
|
||||
for _, file := range files {
|
||||
hdr := &tar.Header{
|
||||
Name: file.Name,
|
||||
Mode: 0600,
|
||||
Size: int64(len(file.Body)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
if _, err := tw.Write([]byte(file.Body)); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
// Make sure to check the error on Close.
|
||||
if err := tw.Close(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// Open the tar archive for reading.
|
||||
r := bytes.NewReader(buf.Bytes())
|
||||
tr := tar.NewReader(r)
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
fmt.Printf("Contents of %s:\n", hdr.Name)
|
||||
if _, err := io.Copy(os.Stdout, tr); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Output:
|
||||
// Contents of readme.txt:
|
||||
// This archive contains some text files.
|
||||
// Contents of gopher.txt:
|
||||
// Gopher names:
|
||||
// George
|
||||
// Geoffrey
|
||||
// Gonzo
|
||||
// Contents of todo.txt:
|
||||
// Get animal handling license.
|
||||
}
|
||||
1002
vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
generated
vendored
Normal file
1002
vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1125
vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go
generated
vendored
Normal file
1125
vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
generated
vendored
Normal file
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux dragonfly openbsd solaris
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func statAtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Atim.Unix())
|
||||
}
|
||||
|
||||
func statCtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Ctim.Unix())
|
||||
}
|
||||
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
generated
vendored
Normal file
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin freebsd netbsd
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func statAtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Atimespec.Unix())
|
||||
}
|
||||
|
||||
func statCtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Ctimespec.Unix())
|
||||
}
|
||||
32
vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
generated
vendored
Normal file
32
vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
sysStat = statUnix
|
||||
}
|
||||
|
||||
func statUnix(fi os.FileInfo, h *Header) error {
|
||||
sys, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
h.Uid = int(sys.Uid)
|
||||
h.Gid = int(sys.Gid)
|
||||
// TODO(bradfitz): populate username & group. os/user
|
||||
// doesn't cache LookupId lookups, and lacks group
|
||||
// lookup functions.
|
||||
h.AccessTime = statAtime(sys)
|
||||
h.ChangeTime = statCtime(sys)
|
||||
// TODO(bradfitz): major/minor device numbers?
|
||||
return nil
|
||||
}
|
||||
325
vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go
generated
vendored
Normal file
325
vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go
generated
vendored
Normal file
@@ -0,0 +1,325 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFileInfoHeader(t *testing.T) {
|
||||
fi, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
h, err := FileInfoHeader(fi, "")
|
||||
if err != nil {
|
||||
t.Fatalf("FileInfoHeader: %v", err)
|
||||
}
|
||||
if g, e := h.Name, "small.txt"; g != e {
|
||||
t.Errorf("Name = %q; want %q", g, e)
|
||||
}
|
||||
if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
|
||||
t.Errorf("Mode = %#o; want %#o", g, e)
|
||||
}
|
||||
if g, e := h.Size, int64(5); g != e {
|
||||
t.Errorf("Size = %v; want %v", g, e)
|
||||
}
|
||||
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
|
||||
t.Errorf("ModTime = %v; want %v", g, e)
|
||||
}
|
||||
// FileInfoHeader should error when passing nil FileInfo
|
||||
if _, err := FileInfoHeader(nil, ""); err == nil {
|
||||
t.Fatalf("Expected error when passing nil to FileInfoHeader")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileInfoHeaderDir(t *testing.T) {
|
||||
fi, err := os.Stat("testdata")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
h, err := FileInfoHeader(fi, "")
|
||||
if err != nil {
|
||||
t.Fatalf("FileInfoHeader: %v", err)
|
||||
}
|
||||
if g, e := h.Name, "testdata/"; g != e {
|
||||
t.Errorf("Name = %q; want %q", g, e)
|
||||
}
|
||||
// Ignoring c_ISGID for golang.org/issue/4867
|
||||
if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
|
||||
t.Errorf("Mode = %#o; want %#o", g, e)
|
||||
}
|
||||
if g, e := h.Size, int64(0); g != e {
|
||||
t.Errorf("Size = %v; want %v", g, e)
|
||||
}
|
||||
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
|
||||
t.Errorf("ModTime = %v; want %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileInfoHeaderSymlink(t *testing.T) {
|
||||
h, err := FileInfoHeader(symlink{}, "some-target")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if g, e := h.Name, "some-symlink"; g != e {
|
||||
t.Errorf("Name = %q; want %q", g, e)
|
||||
}
|
||||
if g, e := h.Linkname, "some-target"; g != e {
|
||||
t.Errorf("Linkname = %q; want %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
type symlink struct{}
|
||||
|
||||
func (symlink) Name() string { return "some-symlink" }
|
||||
func (symlink) Size() int64 { return 0 }
|
||||
func (symlink) Mode() os.FileMode { return os.ModeSymlink }
|
||||
func (symlink) ModTime() time.Time { return time.Time{} }
|
||||
func (symlink) IsDir() bool { return false }
|
||||
func (symlink) Sys() interface{} { return nil }
|
||||
|
||||
func TestRoundTrip(t *testing.T) {
|
||||
data := []byte("some file contents")
|
||||
|
||||
var b bytes.Buffer
|
||||
tw := NewWriter(&b)
|
||||
hdr := &Header{
|
||||
Name: "file.txt",
|
||||
Uid: 1 << 21, // too big for 8 octal digits
|
||||
Size: int64(len(data)),
|
||||
ModTime: time.Now(),
|
||||
}
|
||||
// tar only supports second precision.
|
||||
hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
t.Fatalf("tw.WriteHeader: %v", err)
|
||||
}
|
||||
if _, err := tw.Write(data); err != nil {
|
||||
t.Fatalf("tw.Write: %v", err)
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Fatalf("tw.Close: %v", err)
|
||||
}
|
||||
|
||||
// Read it back.
|
||||
tr := NewReader(&b)
|
||||
rHdr, err := tr.Next()
|
||||
if err != nil {
|
||||
t.Fatalf("tr.Next: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(rHdr, hdr) {
|
||||
t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
|
||||
}
|
||||
rData, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
t.Fatalf("Read: %v", err)
|
||||
}
|
||||
if !bytes.Equal(rData, data) {
|
||||
t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
|
||||
}
|
||||
}
|
||||
|
||||
type headerRoundTripTest struct {
|
||||
h *Header
|
||||
fm os.FileMode
|
||||
}
|
||||
|
||||
func TestHeaderRoundTrip(t *testing.T) {
|
||||
golden := []headerRoundTripTest{
|
||||
// regular file.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "test.txt",
|
||||
Mode: 0644 | c_ISREG,
|
||||
Size: 12,
|
||||
ModTime: time.Unix(1360600916, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0644,
|
||||
},
|
||||
// symbolic link.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "link.txt",
|
||||
Mode: 0777 | c_ISLNK,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360600852, 0),
|
||||
Typeflag: TypeSymlink,
|
||||
},
|
||||
fm: 0777 | os.ModeSymlink,
|
||||
},
|
||||
// character device node.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dev/null",
|
||||
Mode: 0666 | c_ISCHR,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360578951, 0),
|
||||
Typeflag: TypeChar,
|
||||
},
|
||||
fm: 0666 | os.ModeDevice | os.ModeCharDevice,
|
||||
},
|
||||
// block device node.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dev/sda",
|
||||
Mode: 0660 | c_ISBLK,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360578954, 0),
|
||||
Typeflag: TypeBlock,
|
||||
},
|
||||
fm: 0660 | os.ModeDevice,
|
||||
},
|
||||
// directory.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dir/",
|
||||
Mode: 0755 | c_ISDIR,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360601116, 0),
|
||||
Typeflag: TypeDir,
|
||||
},
|
||||
fm: 0755 | os.ModeDir,
|
||||
},
|
||||
// fifo node.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dev/initctl",
|
||||
Mode: 0600 | c_ISFIFO,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360578949, 0),
|
||||
Typeflag: TypeFifo,
|
||||
},
|
||||
fm: 0600 | os.ModeNamedPipe,
|
||||
},
|
||||
// setuid.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "bin/su",
|
||||
Mode: 0755 | c_ISREG | c_ISUID,
|
||||
Size: 23232,
|
||||
ModTime: time.Unix(1355405093, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0755 | os.ModeSetuid,
|
||||
},
|
||||
// setguid.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "group.txt",
|
||||
Mode: 0750 | c_ISREG | c_ISGID,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360602346, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0750 | os.ModeSetgid,
|
||||
},
|
||||
// sticky.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "sticky.txt",
|
||||
Mode: 0600 | c_ISREG | c_ISVTX,
|
||||
Size: 7,
|
||||
ModTime: time.Unix(1360602540, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0600 | os.ModeSticky,
|
||||
},
|
||||
// hard link.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "hard.txt",
|
||||
Mode: 0644 | c_ISREG,
|
||||
Size: 0,
|
||||
Linkname: "file.txt",
|
||||
ModTime: time.Unix(1360600916, 0),
|
||||
Typeflag: TypeLink,
|
||||
},
|
||||
fm: 0644,
|
||||
},
|
||||
// More information.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "info.txt",
|
||||
Mode: 0600 | c_ISREG,
|
||||
Size: 0,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
ModTime: time.Unix(1360602540, 0),
|
||||
Uname: "slartibartfast",
|
||||
Gname: "users",
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0600,
|
||||
},
|
||||
}
|
||||
|
||||
for i, g := range golden {
|
||||
fi := g.h.FileInfo()
|
||||
h2, err := FileInfoHeader(fi, "")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(fi.Name(), "/") {
|
||||
t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name())
|
||||
}
|
||||
name := path.Base(g.h.Name)
|
||||
if fi.IsDir() {
|
||||
name += "/"
|
||||
}
|
||||
if got, want := h2.Name, name; got != want {
|
||||
t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.Size, g.h.Size; got != want {
|
||||
t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.Uid, g.h.Uid; got != want {
|
||||
t.Errorf("i=%d: Uid: got %d, want %d", i, got, want)
|
||||
}
|
||||
if got, want := h2.Gid, g.h.Gid; got != want {
|
||||
t.Errorf("i=%d: Gid: got %d, want %d", i, got, want)
|
||||
}
|
||||
if got, want := h2.Uname, g.h.Uname; got != want {
|
||||
t.Errorf("i=%d: Uname: got %q, want %q", i, got, want)
|
||||
}
|
||||
if got, want := h2.Gname, g.h.Gname; got != want {
|
||||
t.Errorf("i=%d: Gname: got %q, want %q", i, got, want)
|
||||
}
|
||||
if got, want := h2.Linkname, g.h.Linkname; got != want {
|
||||
t.Errorf("i=%d: Linkname: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.Typeflag, g.h.Typeflag; got != want {
|
||||
t.Logf("%#v %#v", g.h, fi.Sys())
|
||||
t.Errorf("i=%d: Typeflag: got %q, want %q", i, got, want)
|
||||
}
|
||||
if got, want := h2.Mode, g.h.Mode; got != want {
|
||||
t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
|
||||
}
|
||||
if got, want := fi.Mode(), g.fm; got != want {
|
||||
t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
|
||||
}
|
||||
if got, want := h2.AccessTime, g.h.AccessTime; got != want {
|
||||
t.Errorf("i=%d: AccessTime: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.ChangeTime, g.h.ChangeTime; got != want {
|
||||
t.Errorf("i=%d: ChangeTime: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.ModTime, g.h.ModTime; got != want {
|
||||
t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
|
||||
}
|
||||
if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
|
||||
t.Errorf("i=%d: Sys didn't return original *Header", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/gnu-multi-hdrs.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/gnu-multi-hdrs.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/gnu.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/gnu.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/hardlink.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/hardlink.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/hdr-only.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/hdr-only.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/issue10968.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/issue10968.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/issue11169.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/issue11169.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/issue12435.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/issue12435.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/neg-size.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/neg-size.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/nil-uid.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/nil-uid.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/pax-multi-hdrs.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/pax-multi-hdrs.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/pax-path-hdr.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/pax-path-hdr.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/pax.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/pax.tar
generated
vendored
Normal file
Binary file not shown.
1
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/small.txt
generated
vendored
Normal file
1
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/small.txt
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Kilts
|
||||
1
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/small2.txt
generated
vendored
Normal file
1
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/small2.txt
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Google.com
|
||||
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/sparse-formats.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/sparse-formats.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/star.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/star.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/ustar-file-reg.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/ustar-file-reg.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/ustar.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/ustar.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/v7.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/v7.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/writer-big-long.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/writer-big-long.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/writer-big.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/writer-big.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/writer.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/writer.tar
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/xattrs.tar
generated
vendored
Normal file
BIN
vendor/github.com/Microsoft/go-winio/archive/tar/testdata/xattrs.tar
generated
vendored
Normal file
Binary file not shown.
444
vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
generated
vendored
Normal file
444
vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
generated
vendored
Normal file
@@ -0,0 +1,444 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
// TODO(dsymonds):
|
||||
// - catch more errors (no first header, etc.)
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrWriteTooLong = errors.New("archive/tar: write too long")
|
||||
ErrFieldTooLong = errors.New("archive/tar: header field too long")
|
||||
ErrWriteAfterClose = errors.New("archive/tar: write after close")
|
||||
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
|
||||
)
|
||||
|
||||
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
|
||||
// A tar archive consists of a sequence of files.
|
||||
// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
|
||||
// writing at most hdr.Size bytes in total.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
err error
|
||||
nb int64 // number of unwritten bytes for current file entry
|
||||
pad int64 // amount of padding to write after current file entry
|
||||
closed bool
|
||||
usedBinary bool // whether the binary numeric field extension was used
|
||||
preferPax bool // use pax header instead of binary numeric header
|
||||
hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
|
||||
paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
|
||||
}
|
||||
|
||||
type formatter struct {
|
||||
err error // Last error seen
|
||||
}
|
||||
|
||||
// NewWriter creates a new Writer writing to w.
|
||||
func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} }
|
||||
|
||||
// Flush finishes writing the current file (optional).
|
||||
func (tw *Writer) Flush() error {
|
||||
if tw.nb > 0 {
|
||||
tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
|
||||
return tw.err
|
||||
}
|
||||
|
||||
n := tw.nb + tw.pad
|
||||
for n > 0 && tw.err == nil {
|
||||
nr := n
|
||||
if nr > blockSize {
|
||||
nr = blockSize
|
||||
}
|
||||
var nw int
|
||||
nw, tw.err = tw.w.Write(zeroBlock[0:nr])
|
||||
n -= int64(nw)
|
||||
}
|
||||
tw.nb = 0
|
||||
tw.pad = 0
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// Write s into b, terminating it with a NUL if there is room.
|
||||
func (f *formatter) formatString(b []byte, s string) {
|
||||
if len(s) > len(b) {
|
||||
f.err = ErrFieldTooLong
|
||||
return
|
||||
}
|
||||
ascii := toASCII(s)
|
||||
copy(b, ascii)
|
||||
if len(ascii) < len(b) {
|
||||
b[len(ascii)] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Encode x as an octal ASCII string and write it into b with leading zeros.
|
||||
func (f *formatter) formatOctal(b []byte, x int64) {
|
||||
s := strconv.FormatInt(x, 8)
|
||||
// leading zeros, but leave room for a NUL.
|
||||
for len(s)+1 < len(b) {
|
||||
s = "0" + s
|
||||
}
|
||||
f.formatString(b, s)
|
||||
}
|
||||
|
||||
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
|
||||
// encoding. Unlike octal encoding, base-256 encoding does not require that the
|
||||
// string ends with a NUL character. Thus, all n bytes are available for output.
|
||||
//
|
||||
// If operating in binary mode, this assumes strict GNU binary mode; which means
|
||||
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
|
||||
// equivalent to the sign bit in two's complement form.
|
||||
func fitsInBase256(n int, x int64) bool {
|
||||
var binBits = uint(n-1) * 8
|
||||
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
|
||||
}
|
||||
|
||||
// Write x into b, as binary (GNUtar/star extension).
|
||||
func (f *formatter) formatNumeric(b []byte, x int64) {
|
||||
if fitsInBase256(len(b), x) {
|
||||
for i := len(b) - 1; i >= 0; i-- {
|
||||
b[i] = byte(x)
|
||||
x >>= 8
|
||||
}
|
||||
b[0] |= 0x80 // Highest bit indicates binary format
|
||||
return
|
||||
}
|
||||
|
||||
f.formatOctal(b, 0) // Last resort, just write zero
|
||||
f.err = ErrFieldTooLong
|
||||
}
|
||||
|
||||
var (
|
||||
minTime = time.Unix(0, 0)
|
||||
// There is room for 11 octal digits (33 bits) of mtime.
|
||||
maxTime = minTime.Add((1<<33 - 1) * time.Second)
|
||||
)
|
||||
|
||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||
// WriteHeader calls Flush if it is not the first header.
|
||||
// Calling after a Close will return ErrWriteAfterClose.
|
||||
func (tw *Writer) WriteHeader(hdr *Header) error {
|
||||
return tw.writeHeader(hdr, true)
|
||||
}
|
||||
|
||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||
// WriteHeader calls Flush if it is not the first header.
|
||||
// Calling after a Close will return ErrWriteAfterClose.
|
||||
// As this method is called internally by writePax header to allow it to
|
||||
// suppress writing the pax header.
|
||||
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||
if tw.closed {
|
||||
return ErrWriteAfterClose
|
||||
}
|
||||
if tw.err == nil {
|
||||
tw.Flush()
|
||||
}
|
||||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// a map to hold pax header records, if any are needed
|
||||
paxHeaders := make(map[string]string)
|
||||
|
||||
// TODO(shanemhansen): we might want to use PAX headers for
|
||||
// subsecond time resolution, but for now let's just capture
|
||||
// too long fields or non ascii characters
|
||||
|
||||
var f formatter
|
||||
var header []byte
|
||||
|
||||
// We need to select which scratch buffer to use carefully,
|
||||
// since this method is called recursively to write PAX headers.
|
||||
// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
|
||||
// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
|
||||
// already being used by the non-recursive call, so we must use paxHdrBuff.
|
||||
header = tw.hdrBuff[:]
|
||||
if !allowPax {
|
||||
header = tw.paxHdrBuff[:]
|
||||
}
|
||||
copy(header, zeroBlock)
|
||||
s := slicer(header)
|
||||
|
||||
// Wrappers around formatter that automatically sets paxHeaders if the
|
||||
// argument extends beyond the capacity of the input byte slice.
|
||||
var formatString = func(b []byte, s string, paxKeyword string) {
|
||||
needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
|
||||
if needsPaxHeader {
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
f.formatString(b, s)
|
||||
}
|
||||
var formatNumeric = func(b []byte, x int64, paxKeyword string) {
|
||||
// Try octal first.
|
||||
s := strconv.FormatInt(x, 8)
|
||||
if len(s) < len(b) {
|
||||
f.formatOctal(b, x)
|
||||
return
|
||||
}
|
||||
|
||||
// If it is too long for octal, and PAX is preferred, use a PAX header.
|
||||
if paxKeyword != paxNone && tw.preferPax {
|
||||
f.formatOctal(b, 0)
|
||||
s := strconv.FormatInt(x, 10)
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
|
||||
tw.usedBinary = true
|
||||
f.formatNumeric(b, x)
|
||||
}
|
||||
var formatTime = func(b []byte, t time.Time, paxKeyword string) {
|
||||
var unixTime int64
|
||||
if !t.Before(minTime) && !t.After(maxTime) {
|
||||
unixTime = t.Unix()
|
||||
}
|
||||
formatNumeric(b, unixTime, paxNone)
|
||||
|
||||
// Write a PAX header if the time didn't fit precisely.
|
||||
if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) {
|
||||
paxHeaders[paxKeyword] = formatPAXTime(t)
|
||||
}
|
||||
}
|
||||
|
||||
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
pathHeaderBytes := s.next(fileNameSize)
|
||||
|
||||
formatString(pathHeaderBytes, hdr.Name, paxPath)
|
||||
|
||||
f.formatOctal(s.next(8), hdr.Mode) // 100:108
|
||||
formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
|
||||
formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
|
||||
formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
|
||||
formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148
|
||||
s.next(8) // chksum (148:156)
|
||||
s.next(1)[0] = hdr.Typeflag // 156:157
|
||||
|
||||
formatString(s.next(100), hdr.Linkname, paxLinkpath)
|
||||
|
||||
copy(s.next(8), []byte("ustar\x0000")) // 257:265
|
||||
formatString(s.next(32), hdr.Uname, paxUname) // 265:297
|
||||
formatString(s.next(32), hdr.Gname, paxGname) // 297:329
|
||||
formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
|
||||
formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
|
||||
|
||||
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
prefixHeaderBytes := s.next(155)
|
||||
formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
|
||||
|
||||
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
|
||||
if tw.usedBinary {
|
||||
copy(header[257:265], []byte("ustar \x00"))
|
||||
}
|
||||
|
||||
_, paxPathUsed := paxHeaders[paxPath]
|
||||
// try to use a ustar header when only the name is too long
|
||||
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
|
||||
prefix, suffix, ok := splitUSTARPath(hdr.Name)
|
||||
if ok {
|
||||
// Since we can encode in USTAR format, disable PAX header.
|
||||
delete(paxHeaders, paxPath)
|
||||
|
||||
// Update the path fields
|
||||
formatString(pathHeaderBytes, suffix, paxNone)
|
||||
formatString(prefixHeaderBytes, prefix, paxNone)
|
||||
}
|
||||
}
|
||||
|
||||
// The chksum field is terminated by a NUL and a space.
|
||||
// This is different from the other octal fields.
|
||||
chksum, _ := checksum(header)
|
||||
f.formatOctal(header[148:155], chksum) // Never fails
|
||||
header[155] = ' '
|
||||
|
||||
// Check if there were any formatting errors.
|
||||
if f.err != nil {
|
||||
tw.err = f.err
|
||||
return tw.err
|
||||
}
|
||||
|
||||
if allowPax {
|
||||
if !hdr.AccessTime.IsZero() {
|
||||
paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime)
|
||||
}
|
||||
if !hdr.ChangeTime.IsZero() {
|
||||
paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime)
|
||||
}
|
||||
if !hdr.CreationTime.IsZero() {
|
||||
paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime)
|
||||
}
|
||||
for k, v := range hdr.Xattrs {
|
||||
paxHeaders[paxXattr+k] = v
|
||||
}
|
||||
for k, v := range hdr.Winheaders {
|
||||
paxHeaders[paxWindows+k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(paxHeaders) > 0 {
|
||||
if !allowPax {
|
||||
return errInvalidHeader
|
||||
}
|
||||
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
tw.nb = int64(hdr.Size)
|
||||
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
|
||||
|
||||
_, tw.err = tw.w.Write(header)
|
||||
return tw.err
|
||||
}
|
||||
|
||||
func formatPAXTime(t time.Time) string {
|
||||
sec := t.Unix()
|
||||
usec := t.Nanosecond()
|
||||
s := strconv.FormatInt(sec, 10)
|
||||
if usec != 0 {
|
||||
s = fmt.Sprintf("%s.%09d", s, usec)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
|
||||
// If the path is not splittable, then it will return ("", "", false).
|
||||
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
|
||||
length := len(name)
|
||||
if length <= fileNameSize || !isASCII(name) {
|
||||
return "", "", false
|
||||
} else if length > fileNamePrefixSize+1 {
|
||||
length = fileNamePrefixSize + 1
|
||||
} else if name[length-1] == '/' {
|
||||
length--
|
||||
}
|
||||
|
||||
i := strings.LastIndex(name[:length], "/")
|
||||
nlen := len(name) - i - 1 // nlen is length of suffix
|
||||
plen := i // plen is length of prefix
|
||||
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
|
||||
return "", "", false
|
||||
}
|
||||
return name[:i], name[i+1:], true
|
||||
}
|
||||
|
||||
// writePaxHeader writes an extended pax header to the
|
||||
// archive.
|
||||
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
|
||||
// Prepare extended header
|
||||
ext := new(Header)
|
||||
ext.Typeflag = TypeXHeader
|
||||
// Setting ModTime is required for reader parsing to
|
||||
// succeed, and seems harmless enough.
|
||||
ext.ModTime = hdr.ModTime
|
||||
// The spec asks that we namespace our pseudo files
|
||||
// with the current pid. However, this results in differing outputs
|
||||
// for identical inputs. As such, the constant 0 is now used instead.
|
||||
// golang.org/issue/12358
|
||||
dir, file := path.Split(hdr.Name)
|
||||
fullName := path.Join(dir, "PaxHeaders.0", file)
|
||||
|
||||
ascii := toASCII(fullName)
|
||||
if len(ascii) > 100 {
|
||||
ascii = ascii[:100]
|
||||
}
|
||||
ext.Name = ascii
|
||||
// Construct the body
|
||||
var buf bytes.Buffer
|
||||
|
||||
// Keys are sorted before writing to body to allow deterministic output.
|
||||
var keys []string
|
||||
for k := range paxHeaders {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
|
||||
}
|
||||
|
||||
ext.Size = int64(len(buf.Bytes()))
|
||||
if err := tw.writeHeader(ext, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tw.Write(buf.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// formatPAXRecord formats a single PAX record, prefixing it with the
|
||||
// appropriate length.
|
||||
func formatPAXRecord(k, v string) string {
|
||||
const padding = 3 // Extra padding for ' ', '=', and '\n'
|
||||
size := len(k) + len(v) + padding
|
||||
size += len(strconv.Itoa(size))
|
||||
record := fmt.Sprintf("%d %s=%s\n", size, k, v)
|
||||
|
||||
// Final adjustment if adding size field increased the record size.
|
||||
if len(record) != size {
|
||||
size = len(record)
|
||||
record = fmt.Sprintf("%d %s=%s\n", size, k, v)
|
||||
}
|
||||
return record
|
||||
}
|
||||
|
||||
// Write writes to the current entry in the tar archive.
|
||||
// Write returns the error ErrWriteTooLong if more than
|
||||
// hdr.Size bytes are written after WriteHeader.
|
||||
func (tw *Writer) Write(b []byte) (n int, err error) {
|
||||
if tw.closed {
|
||||
err = ErrWriteAfterClose
|
||||
return
|
||||
}
|
||||
overwrite := false
|
||||
if int64(len(b)) > tw.nb {
|
||||
b = b[0:tw.nb]
|
||||
overwrite = true
|
||||
}
|
||||
n, err = tw.w.Write(b)
|
||||
tw.nb -= int64(n)
|
||||
if err == nil && overwrite {
|
||||
err = ErrWriteTooLong
|
||||
return
|
||||
}
|
||||
tw.err = err
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the tar archive, flushing any unwritten
|
||||
// data to the underlying writer.
|
||||
func (tw *Writer) Close() error {
|
||||
if tw.err != nil || tw.closed {
|
||||
return tw.err
|
||||
}
|
||||
tw.Flush()
|
||||
tw.closed = true
|
||||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// trailer: two zero blocks
|
||||
for i := 0; i < 2; i++ {
|
||||
_, tw.err = tw.w.Write(zeroBlock)
|
||||
if tw.err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return tw.err
|
||||
}
|
||||
739
vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go
generated
vendored
Normal file
739
vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go
generated
vendored
Normal file
@@ -0,0 +1,739 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/iotest"
|
||||
"time"
|
||||
)
|
||||
|
||||
type writerTestEntry struct {
|
||||
header *Header
|
||||
contents string
|
||||
}
|
||||
|
||||
type writerTest struct {
|
||||
file string // filename of expected output
|
||||
entries []*writerTestEntry
|
||||
}
|
||||
|
||||
var writerTests = []*writerTest{
|
||||
// The writer test file was produced with this command:
|
||||
// tar (GNU tar) 1.26
|
||||
// ln -s small.txt link.txt
|
||||
// tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
|
||||
{
|
||||
file: "testdata/writer.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: "small.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 5,
|
||||
ModTime: time.Unix(1246508266, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
contents: "Kilts",
|
||||
},
|
||||
{
|
||||
header: &Header{
|
||||
Name: "small2.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 11,
|
||||
ModTime: time.Unix(1245217492, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
contents: "Google.com\n",
|
||||
},
|
||||
{
|
||||
header: &Header{
|
||||
Name: "link.txt",
|
||||
Mode: 0777,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1314603082, 0),
|
||||
Typeflag: '2',
|
||||
Linkname: "small.txt",
|
||||
Uname: "strings",
|
||||
Gname: "strings",
|
||||
},
|
||||
// no contents
|
||||
},
|
||||
},
|
||||
},
|
||||
// The truncated test file was produced using these commands:
|
||||
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
|
||||
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
|
||||
{
|
||||
file: "testdata/writer-big.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: "tmp/16gig.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 16 << 30,
|
||||
ModTime: time.Unix(1254699560, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
// fake contents
|
||||
contents: strings.Repeat("\x00", 4<<10),
|
||||
},
|
||||
},
|
||||
},
|
||||
// The truncated test file was produced using these commands:
|
||||
// dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt
|
||||
// tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar
|
||||
{
|
||||
file: "testdata/writer-big-long.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: strings.Repeat("longname/", 15) + "16gig.txt",
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Size: 16 << 30,
|
||||
ModTime: time.Unix(1399583047, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "guillaume",
|
||||
Gname: "guillaume",
|
||||
},
|
||||
// fake contents
|
||||
contents: strings.Repeat("\x00", 4<<10),
|
||||
},
|
||||
},
|
||||
},
|
||||
// This file was produced using gnu tar 1.17
|
||||
// gnutar -b 4 --format=ustar (longname/)*15 + file.txt
|
||||
{
|
||||
file: "testdata/ustar.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: strings.Repeat("longname/", 15) + "file.txt",
|
||||
Mode: 0644,
|
||||
Uid: 0765,
|
||||
Gid: 024,
|
||||
Size: 06,
|
||||
ModTime: time.Unix(1360135598, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "shane",
|
||||
Gname: "staff",
|
||||
},
|
||||
contents: "hello\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
// This file was produced using gnu tar 1.26
|
||||
// echo "Slartibartfast" > file.txt
|
||||
// ln file.txt hard.txt
|
||||
// tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt
|
||||
{
|
||||
file: "testdata/hardlink.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: "file.txt",
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 100,
|
||||
Size: 15,
|
||||
ModTime: time.Unix(1425484303, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "vbatts",
|
||||
Gname: "users",
|
||||
},
|
||||
contents: "Slartibartfast\n",
|
||||
},
|
||||
{
|
||||
header: &Header{
|
||||
Name: "hard.txt",
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 100,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1425484303, 0),
|
||||
Typeflag: '1',
|
||||
Linkname: "file.txt",
|
||||
Uname: "vbatts",
|
||||
Gname: "users",
|
||||
},
|
||||
// no contents
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
|
||||
func bytestr(offset int, b []byte) string {
|
||||
const rowLen = 32
|
||||
s := fmt.Sprintf("%04x ", offset)
|
||||
for _, ch := range b {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
|
||||
s += fmt.Sprintf(" %c", ch)
|
||||
default:
|
||||
s += fmt.Sprintf(" %02x", ch)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Render a pseudo-diff between two blocks of bytes.
|
||||
func bytediff(a []byte, b []byte) string {
|
||||
const rowLen = 32
|
||||
s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
|
||||
for offset := 0; len(a)+len(b) > 0; offset += rowLen {
|
||||
na, nb := rowLen, rowLen
|
||||
if na > len(a) {
|
||||
na = len(a)
|
||||
}
|
||||
if nb > len(b) {
|
||||
nb = len(b)
|
||||
}
|
||||
sa := bytestr(offset, a[0:na])
|
||||
sb := bytestr(offset, b[0:nb])
|
||||
if sa != sb {
|
||||
s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
|
||||
}
|
||||
a = a[na:]
|
||||
b = b[nb:]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func TestWriter(t *testing.T) {
|
||||
testLoop:
|
||||
for i, test := range writerTests {
|
||||
expected, err := ioutil.ReadFile(test.file)
|
||||
if err != nil {
|
||||
t.Errorf("test %d: Unexpected error: %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
|
||||
big := false
|
||||
for j, entry := range test.entries {
|
||||
big = big || entry.header.Size > 1<<10
|
||||
if err := tw.WriteHeader(entry.header); err != nil {
|
||||
t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
|
||||
continue testLoop
|
||||
}
|
||||
if _, err := io.WriteString(tw, entry.contents); err != nil {
|
||||
t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
|
||||
continue testLoop
|
||||
}
|
||||
}
|
||||
// Only interested in Close failures for the small tests.
|
||||
if err := tw.Close(); err != nil && !big {
|
||||
t.Errorf("test %d: Failed closing archive: %v", i, err)
|
||||
continue testLoop
|
||||
}
|
||||
|
||||
actual := buf.Bytes()
|
||||
if !bytes.Equal(expected, actual) {
|
||||
t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
|
||||
i, bytediff(expected, actual))
|
||||
}
|
||||
if testing.Short() { // The second test is expensive.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPax(t *testing.T) {
|
||||
// Create an archive with a large name
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat: %v", err)
|
||||
}
|
||||
// Force a PAX long name to be written
|
||||
longName := strings.Repeat("ab", 100)
|
||||
contents := strings.Repeat(" ", int(hdr.Size))
|
||||
hdr.Name = longName
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != longName {
|
||||
t.Fatal("Couldn't recover long file name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxSymlink(t *testing.T) {
|
||||
// Create an archive with a large linkname
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeSymlink
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
// Force a PAX long linkname to be written
|
||||
longLinkname := strings.Repeat("1234567890/1234567890", 10)
|
||||
hdr.Linkname = longLinkname
|
||||
|
||||
hdr.Size = 0
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Linkname != longLinkname {
|
||||
t.Fatal("Couldn't recover long link name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxNonAscii(t *testing.T) {
|
||||
// Create an archive with non ascii. These should trigger a pax header
|
||||
// because pax headers have a defined utf-8 encoding.
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
|
||||
// some sample data
|
||||
chineseFilename := "文件名"
|
||||
chineseGroupname := "組"
|
||||
chineseUsername := "用戶名"
|
||||
|
||||
hdr.Name = chineseFilename
|
||||
hdr.Gname = chineseGroupname
|
||||
hdr.Uname = chineseUsername
|
||||
|
||||
contents := strings.Repeat(" ", int(hdr.Size))
|
||||
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != chineseFilename {
|
||||
t.Fatal("Couldn't recover unicode name")
|
||||
}
|
||||
if hdr.Gname != chineseGroupname {
|
||||
t.Fatal("Couldn't recover unicode group")
|
||||
}
|
||||
if hdr.Uname != chineseUsername {
|
||||
t.Fatal("Couldn't recover unicode user")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxXattrs(t *testing.T) {
|
||||
xattrs := map[string]string{
|
||||
"user.key": "value",
|
||||
}
|
||||
|
||||
// Create an archive with an xattr
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat: %v", err)
|
||||
}
|
||||
contents := "Kilts"
|
||||
hdr.Xattrs = xattrs
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test that we can get the xattrs back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
|
||||
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
|
||||
hdr.Xattrs, xattrs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxHeadersSorted(t *testing.T) {
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat: %v", err)
|
||||
}
|
||||
contents := strings.Repeat(" ", int(hdr.Size))
|
||||
|
||||
hdr.Xattrs = map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
"baz": "baz",
|
||||
"qux": "qux",
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
|
||||
// xattr bar should always appear before others
|
||||
indices := []int{
|
||||
bytes.Index(buf.Bytes(), []byte("bar=bar")),
|
||||
bytes.Index(buf.Bytes(), []byte("baz=baz")),
|
||||
bytes.Index(buf.Bytes(), []byte("foo=foo")),
|
||||
bytes.Index(buf.Bytes(), []byte("qux=qux")),
|
||||
}
|
||||
if !sort.IntsAreSorted(indices) {
|
||||
t.Fatal("PAX headers are not sorted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUSTARLongName(t *testing.T) {
|
||||
// Create an archive with a path that failed to split with USTAR extension in previous versions.
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeDir
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
// Force a PAX long name to be written. The name was taken from a practical example
|
||||
// that fails and replaced ever char through numbers to anonymize the sample.
|
||||
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
|
||||
hdr.Name = longName
|
||||
|
||||
hdr.Size = 0
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != longName {
|
||||
t.Fatal("Couldn't recover long name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidTypeflagWithPAXHeader(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
tw := NewWriter(&buffer)
|
||||
|
||||
fileName := strings.Repeat("ab", 100)
|
||||
|
||||
hdr := &Header{
|
||||
Name: fileName,
|
||||
Size: 4,
|
||||
Typeflag: 0,
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
t.Fatalf("Failed to write header: %s", err)
|
||||
}
|
||||
if _, err := tw.Write([]byte("fooo")); err != nil {
|
||||
t.Fatalf("Failed to write the file's data: %s", err)
|
||||
}
|
||||
tw.Close()
|
||||
|
||||
tr := NewReader(&buffer)
|
||||
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read header: %s", err)
|
||||
}
|
||||
if header.Typeflag != 0 {
|
||||
t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteAfterClose(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
tw := NewWriter(&buffer)
|
||||
|
||||
hdr := &Header{
|
||||
Name: "small.txt",
|
||||
Size: 5,
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
t.Fatalf("Failed to write header: %s", err)
|
||||
}
|
||||
tw.Close()
|
||||
if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose {
|
||||
t.Fatalf("Write: got %v; want ErrWriteAfterClose", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitUSTARPath(t *testing.T) {
|
||||
var sr = strings.Repeat
|
||||
|
||||
var vectors = []struct {
|
||||
input string // Input path
|
||||
prefix string // Expected output prefix
|
||||
suffix string // Expected output suffix
|
||||
ok bool // Split success?
|
||||
}{
|
||||
{"", "", "", false},
|
||||
{"abc", "", "", false},
|
||||
{"用戶名", "", "", false},
|
||||
{sr("a", fileNameSize), "", "", false},
|
||||
{sr("a", fileNameSize) + "/", "", "", false},
|
||||
{sr("a", fileNameSize) + "/a", sr("a", fileNameSize), "a", true},
|
||||
{sr("a", fileNamePrefixSize) + "/", "", "", false},
|
||||
{sr("a", fileNamePrefixSize) + "/a", sr("a", fileNamePrefixSize), "a", true},
|
||||
{sr("a", fileNameSize+1), "", "", false},
|
||||
{sr("/", fileNameSize+1), sr("/", fileNameSize-1), "/", true},
|
||||
{sr("a", fileNamePrefixSize) + "/" + sr("b", fileNameSize),
|
||||
sr("a", fileNamePrefixSize), sr("b", fileNameSize), true},
|
||||
{sr("a", fileNamePrefixSize) + "//" + sr("b", fileNameSize), "", "", false},
|
||||
{sr("a/", fileNameSize), sr("a/", 77) + "a", sr("a/", 22), true},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
prefix, suffix, ok := splitUSTARPath(v.input)
|
||||
if prefix != v.prefix || suffix != v.suffix || ok != v.ok {
|
||||
t.Errorf("splitUSTARPath(%q):\ngot (%q, %q, %v)\nwant (%q, %q, %v)",
|
||||
v.input, prefix, suffix, ok, v.prefix, v.suffix, v.ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatPAXRecord(t *testing.T) {
|
||||
var medName = strings.Repeat("CD", 50)
|
||||
var longName = strings.Repeat("AB", 100)
|
||||
|
||||
var vectors = []struct {
|
||||
inputKey string
|
||||
inputVal string
|
||||
output string
|
||||
}{
|
||||
{"k", "v", "6 k=v\n"},
|
||||
{"path", "/etc/hosts", "19 path=/etc/hosts\n"},
|
||||
{"path", longName, "210 path=" + longName + "\n"},
|
||||
{"path", medName, "110 path=" + medName + "\n"},
|
||||
{"foo", "ba", "9 foo=ba\n"},
|
||||
{"foo", "bar", "11 foo=bar\n"},
|
||||
{"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n"},
|
||||
{"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n"},
|
||||
{"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n"},
|
||||
{"\x00hello", "\x00world", "17 \x00hello=\x00world\n"},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
output := formatPAXRecord(v.inputKey, v.inputVal)
|
||||
if output != v.output {
|
||||
t.Errorf("formatPAXRecord(%q, %q): got %q, want %q",
|
||||
v.inputKey, v.inputVal, output, v.output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFitsInBase256(t *testing.T) {
|
||||
var vectors = []struct {
|
||||
input int64
|
||||
width int
|
||||
ok bool
|
||||
}{
|
||||
{+1, 8, true},
|
||||
{0, 8, true},
|
||||
{-1, 8, true},
|
||||
{1 << 56, 8, false},
|
||||
{(1 << 56) - 1, 8, true},
|
||||
{-1 << 56, 8, true},
|
||||
{(-1 << 56) - 1, 8, false},
|
||||
{121654, 8, true},
|
||||
{-9849849, 8, true},
|
||||
{math.MaxInt64, 9, true},
|
||||
{0, 9, true},
|
||||
{math.MinInt64, 9, true},
|
||||
{math.MaxInt64, 12, true},
|
||||
{0, 12, true},
|
||||
{math.MinInt64, 12, true},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
ok := fitsInBase256(v.width, v.input)
|
||||
if ok != v.ok {
|
||||
t.Errorf("checkNumeric(%d, %d): got %v, want %v", v.input, v.width, ok, v.ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatNumeric(t *testing.T) {
|
||||
var vectors = []struct {
|
||||
input int64
|
||||
output string
|
||||
ok bool
|
||||
}{
|
||||
// Test base-256 (binary) encoded values.
|
||||
{-1, "\xff", true},
|
||||
{-1, "\xff\xff", true},
|
||||
{-1, "\xff\xff\xff", true},
|
||||
{(1 << 0), "0", false},
|
||||
{(1 << 8) - 1, "\x80\xff", true},
|
||||
{(1 << 8), "0\x00", false},
|
||||
{(1 << 16) - 1, "\x80\xff\xff", true},
|
||||
{(1 << 16), "00\x00", false},
|
||||
{-1 * (1 << 0), "\xff", true},
|
||||
{-1*(1<<0) - 1, "0", false},
|
||||
{-1 * (1 << 8), "\xff\x00", true},
|
||||
{-1*(1<<8) - 1, "0\x00", false},
|
||||
{-1 * (1 << 16), "\xff\x00\x00", true},
|
||||
{-1*(1<<16) - 1, "00\x00", false},
|
||||
{537795476381659745, "0000000\x00", false},
|
||||
{537795476381659745, "\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", true},
|
||||
{-615126028225187231, "0000000\x00", false},
|
||||
{-615126028225187231, "\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", true},
|
||||
{math.MaxInt64, "0000000\x00", false},
|
||||
{math.MaxInt64, "\x80\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff", true},
|
||||
{math.MinInt64, "0000000\x00", false},
|
||||
{math.MinInt64, "\xff\xff\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
|
||||
{math.MaxInt64, "\x80\x7f\xff\xff\xff\xff\xff\xff\xff", true},
|
||||
{math.MinInt64, "\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
var f formatter
|
||||
output := make([]byte, len(v.output))
|
||||
f.formatNumeric(output, v.input)
|
||||
ok := (f.err == nil)
|
||||
if ok != v.ok {
|
||||
if v.ok {
|
||||
t.Errorf("formatNumeric(%d): got formatting failure, want success", v.input)
|
||||
} else {
|
||||
t.Errorf("formatNumeric(%d): got formatting success, want failure", v.input)
|
||||
}
|
||||
}
|
||||
if string(output) != v.output {
|
||||
t.Errorf("formatNumeric(%d): got %q, want %q", v.input, output, v.output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatPAXTime(t *testing.T) {
|
||||
t1 := time.Date(2000, 1, 1, 11, 0, 0, 0, time.UTC)
|
||||
t2 := time.Date(2000, 1, 1, 11, 0, 0, 100, time.UTC)
|
||||
t3 := time.Date(1960, 1, 1, 11, 0, 0, 0, time.UTC)
|
||||
t4 := time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
verify := func(time time.Time, s string) {
|
||||
p := formatPAXTime(time)
|
||||
if p != s {
|
||||
t.Errorf("for %v, expected %s, got %s", time, s, p)
|
||||
}
|
||||
}
|
||||
verify(t1, "946724400")
|
||||
verify(t2, "946724400.000000100")
|
||||
verify(t3, "-315579600")
|
||||
verify(t4, "0")
|
||||
}
|
||||
4
vendor/github.com/Microsoft/go-winio/backuptar/noop.go
generated
vendored
Normal file
4
vendor/github.com/Microsoft/go-winio/backuptar/noop.go
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
// +build !windows
|
||||
// This file only exists to allow go get on non-Windows platforms.
|
||||
|
||||
package backuptar
|
||||
439
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
Normal file
439
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
Normal file
@@ -0,0 +1,439 @@
|
||||
// +build windows
|
||||
|
||||
package backuptar
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface
|
||||
)
|
||||
|
||||
const (
|
||||
c_ISUID = 04000 // Set uid
|
||||
c_ISGID = 02000 // Set gid
|
||||
c_ISVTX = 01000 // Save text (sticky bit)
|
||||
c_ISDIR = 040000 // Directory
|
||||
c_ISFIFO = 010000 // FIFO
|
||||
c_ISREG = 0100000 // Regular file
|
||||
c_ISLNK = 0120000 // Symbolic link
|
||||
c_ISBLK = 060000 // Block special file
|
||||
c_ISCHR = 020000 // Character special file
|
||||
c_ISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
const (
|
||||
hdrFileAttributes = "fileattr"
|
||||
hdrSecurityDescriptor = "sd"
|
||||
hdrRawSecurityDescriptor = "rawsd"
|
||||
hdrMountPoint = "mountpoint"
|
||||
hdrEaPrefix = "xattr."
|
||||
)
|
||||
|
||||
func writeZeroes(w io.Writer, count int64) error {
|
||||
buf := make([]byte, 8192)
|
||||
c := len(buf)
|
||||
for i := int64(0); i < count; i += int64(c) {
|
||||
if int64(c) > count-i {
|
||||
c = int(count - i)
|
||||
}
|
||||
_, err := w.Write(buf[:c])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
|
||||
curOffset := int64(0)
|
||||
for {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bhdr.Id != winio.BackupSparseBlock {
|
||||
return fmt.Errorf("unexpected stream %d", bhdr.Id)
|
||||
}
|
||||
|
||||
// archive/tar does not support writing sparse files
|
||||
// so just write zeroes to catch up to the current offset.
|
||||
err = writeZeroes(t, bhdr.Offset-curOffset)
|
||||
if bhdr.Size == 0 {
|
||||
break
|
||||
}
|
||||
n, err := io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
curOffset = bhdr.Offset + n
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BasicInfoHeader creates a tar header from basic file information.
|
||||
func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header {
|
||||
hdr := &tar.Header{
|
||||
Name: filepath.ToSlash(name),
|
||||
Size: size,
|
||||
Typeflag: tar.TypeReg,
|
||||
ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
|
||||
ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
|
||||
AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
|
||||
CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()),
|
||||
Winheaders: make(map[string]string),
|
||||
}
|
||||
hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
|
||||
|
||||
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
||||
hdr.Mode |= c_ISDIR
|
||||
hdr.Size = 0
|
||||
hdr.Typeflag = tar.TypeDir
|
||||
}
|
||||
return hdr
|
||||
}
|
||||
|
||||
// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream.
|
||||
//
|
||||
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
|
||||
//
|
||||
// The additional Win32 metadata is:
|
||||
//
|
||||
// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
|
||||
//
|
||||
// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
|
||||
//
|
||||
// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
|
||||
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
|
||||
name = filepath.ToSlash(name)
|
||||
hdr := BasicInfoHeader(name, size, fileInfo)
|
||||
|
||||
// If r can be seeked, then this function is two-pass: pass 1 collects the
|
||||
// tar header data, and pass 2 copies the data stream. If r cannot be
|
||||
// seeked, then some header data (in particular EAs) will be silently lost.
|
||||
var (
|
||||
restartPos int64
|
||||
err error
|
||||
)
|
||||
sr, readTwice := r.(io.Seeker)
|
||||
if readTwice {
|
||||
if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil {
|
||||
readTwice = false
|
||||
}
|
||||
}
|
||||
|
||||
br := winio.NewBackupStreamReader(r)
|
||||
var dataHdr *winio.BackupHeader
|
||||
for dataHdr == nil {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch bhdr.Id {
|
||||
case winio.BackupData:
|
||||
hdr.Mode |= c_ISREG
|
||||
if !readTwice {
|
||||
dataHdr = bhdr
|
||||
}
|
||||
case winio.BackupSecurity:
|
||||
sd, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
|
||||
|
||||
case winio.BackupReparseData:
|
||||
hdr.Mode |= c_ISLNK
|
||||
hdr.Typeflag = tar.TypeSymlink
|
||||
reparseBuffer, err := ioutil.ReadAll(br)
|
||||
rp, err := winio.DecodeReparsePoint(reparseBuffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rp.IsMountPoint {
|
||||
hdr.Winheaders[hdrMountPoint] = "1"
|
||||
}
|
||||
hdr.Linkname = rp.Target
|
||||
|
||||
case winio.BackupEaData:
|
||||
eab, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eas, err := winio.DecodeExtendedAttributes(eab)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ea := range eas {
|
||||
// Use base64 encoding for the binary value. Note that there
|
||||
// is no way to encode the EA's flags, since their use doesn't
|
||||
// make any sense for persisted EAs.
|
||||
hdr.Winheaders[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
|
||||
}
|
||||
|
||||
case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
|
||||
// ignore these streams
|
||||
default:
|
||||
return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id)
|
||||
}
|
||||
}
|
||||
|
||||
err = t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if readTwice {
|
||||
// Get back to the data stream.
|
||||
if _, err = sr.Seek(restartPos, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
for dataHdr == nil {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bhdr.Id == winio.BackupData {
|
||||
dataHdr = bhdr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if dataHdr != nil {
|
||||
// A data stream was found. Copy the data.
|
||||
if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 {
|
||||
if size != dataHdr.Size {
|
||||
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
|
||||
}
|
||||
_, err = io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = copySparse(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Look for streams after the data stream. The only ones we handle are alternate data streams.
|
||||
// Other streams may have metadata that could be serialized, but the tar header has already
|
||||
// been written. In practice, this means that we don't get EA or TXF metadata.
|
||||
for {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch bhdr.Id {
|
||||
case winio.BackupAlternateData:
|
||||
altName := bhdr.Name
|
||||
if strings.HasSuffix(altName, ":$DATA") {
|
||||
altName = altName[:len(altName)-len(":$DATA")]
|
||||
}
|
||||
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
|
||||
hdr = &tar.Header{
|
||||
Name: name + altName,
|
||||
Mode: hdr.Mode,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: bhdr.Size,
|
||||
ModTime: hdr.ModTime,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
}
|
||||
err = t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else {
|
||||
// Unsupported for now, since the size of the alternate stream is not present
|
||||
// in the backup stream until after the data has been read.
|
||||
return errors.New("tar of sparse alternate data streams is unsupported")
|
||||
}
|
||||
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
|
||||
// ignore these streams
|
||||
default:
|
||||
return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by
|
||||
// WriteTarFileFromBackupStream.
|
||||
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
|
||||
name = hdr.Name
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
size = hdr.Size
|
||||
}
|
||||
fileInfo = &winio.FileBasicInfo{
|
||||
LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()),
|
||||
LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
|
||||
ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()),
|
||||
CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()),
|
||||
}
|
||||
if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok {
|
||||
attr, err := strconv.ParseUint(attrStr, 10, 32)
|
||||
if err != nil {
|
||||
return "", 0, nil, err
|
||||
}
|
||||
fileInfo.FileAttributes = uintptr(attr)
|
||||
} else {
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple
|
||||
// tar file entries in order to collect all the alternate data streams for the file, it returns the next
|
||||
// tar file that was not processed, or io.EOF is there are no more.
|
||||
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
|
||||
bw := winio.NewBackupStreamWriter(w)
|
||||
var sd []byte
|
||||
var err error
|
||||
// Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
|
||||
// by this library will have raw binary for the security descriptor.
|
||||
if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok {
|
||||
sd, err = winio.SddlToSecurityDescriptor(sddl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok {
|
||||
sd, err = base64.StdEncoding.DecodeString(sdraw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if len(sd) != 0 {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupSecurity,
|
||||
Size: int64(len(sd)),
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(sd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var eas []winio.ExtendedAttribute
|
||||
for k, v := range hdr.Winheaders {
|
||||
if !strings.HasPrefix(k, hdrEaPrefix) {
|
||||
continue
|
||||
}
|
||||
data, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eas = append(eas, winio.ExtendedAttribute{
|
||||
Name: k[len(hdrEaPrefix):],
|
||||
Value: data,
|
||||
})
|
||||
}
|
||||
if len(eas) != 0 {
|
||||
eadata, err := winio.EncodeExtendedAttributes(eas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupEaData,
|
||||
Size: int64(len(eadata)),
|
||||
}
|
||||
err = bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(eadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeSymlink {
|
||||
_, isMountPoint := hdr.Winheaders[hdrMountPoint]
|
||||
rp := winio.ReparsePoint{
|
||||
Target: filepath.FromSlash(hdr.Linkname),
|
||||
IsMountPoint: isMountPoint,
|
||||
}
|
||||
reparse := winio.EncodeReparsePoint(&rp)
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupReparseData,
|
||||
Size: int64(len(reparse)),
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(reparse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupData,
|
||||
Size: hdr.Size,
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.Copy(bw, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Copy all the alternate data streams and return the next non-ADS header.
|
||||
for {
|
||||
ahdr, err := t.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") {
|
||||
return ahdr, nil
|
||||
}
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupAlternateData,
|
||||
Size: ahdr.Size,
|
||||
Name: ahdr.Name[len(hdr.Name):] + ":$DATA",
|
||||
}
|
||||
err = bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.Copy(bw, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
84
vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go
generated
vendored
Normal file
84
vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
package backuptar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/go-winio/archive/tar"
|
||||
)
|
||||
|
||||
func ensurePresent(t *testing.T, m map[string]string, keys ...string) {
|
||||
for _, k := range keys {
|
||||
if _, ok := m[k]; !ok {
|
||||
t.Error(k, "not present in tar header")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundTrip(t *testing.T) {
|
||||
f, err := ioutil.TempFile("", "tst")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
if _, err = f.Write([]byte("testing 1 2 3\n")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err = f.Seek(0, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bi, err := winio.GetFileBasicInfo(f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
br := winio.NewBackupFileReader(f, true)
|
||||
defer br.Close()
|
||||
|
||||
var buf bytes.Buffer
|
||||
tw := tar.NewWriter(&buf)
|
||||
|
||||
err = WriteTarFileFromBackupStream(tw, br, f.Name(), fi.Size(), bi)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tr := tar.NewReader(&buf)
|
||||
hdr, err := tr.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
name, size, bi2, err := FileInfoFromHeader(hdr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if name != filepath.ToSlash(f.Name()) {
|
||||
t.Errorf("got name %s, expected %s", name, filepath.ToSlash(f.Name()))
|
||||
}
|
||||
|
||||
if size != fi.Size() {
|
||||
t.Errorf("got size %d, expected %d", size, fi.Size())
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(*bi, *bi2) {
|
||||
t.Errorf("got %#v, expected %#v", *bi, *bi2)
|
||||
}
|
||||
|
||||
ensurePresent(t, hdr.Winheaders, "fileattr", "rawsd")
|
||||
}
|
||||
3
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
3
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
@@ -16,7 +16,6 @@ import (
|
||||
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
||||
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||
//sys timeBeginPeriod(period uint32) (n int32) = winmm.timeBeginPeriod
|
||||
|
||||
type atomicBool int32
|
||||
|
||||
@@ -153,8 +152,6 @@ func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
|
||||
// ioCompletionProcessor processes completed async IOs forever
|
||||
func ioCompletionProcessor(h syscall.Handle) {
|
||||
// Set the timer resolution to 1. This fixes a performance regression in golang 1.6.
|
||||
timeBeginPeriod(1)
|
||||
for {
|
||||
var bytes uint32
|
||||
var key uintptr
|
||||
|
||||
64
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
64
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
@@ -22,6 +22,7 @@ import (
|
||||
|
||||
const (
|
||||
cERROR_PIPE_BUSY = syscall.Errno(231)
|
||||
cERROR_NO_DATA = syscall.Errno(232)
|
||||
cERROR_PIPE_CONNECTED = syscall.Errno(535)
|
||||
cERROR_SEM_TIMEOUT = syscall.Errno(121)
|
||||
|
||||
@@ -254,6 +255,36 @@ func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
|
||||
p, err := l.makeServerPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wait for the client to connect.
|
||||
ch := make(chan error)
|
||||
go func(p *win32File) {
|
||||
ch <- connectPipe(p)
|
||||
}(p)
|
||||
|
||||
select {
|
||||
case err = <-ch:
|
||||
if err != nil {
|
||||
p.Close()
|
||||
p = nil
|
||||
}
|
||||
case <-l.closeCh:
|
||||
// Abort the connect request by closing the handle.
|
||||
p.Close()
|
||||
p = nil
|
||||
err = <-ch
|
||||
if err == nil || err == ErrFileClosed {
|
||||
err = ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) listenerRoutine() {
|
||||
closed := false
|
||||
for !closed {
|
||||
@@ -261,31 +292,20 @@ func (l *win32PipeListener) listenerRoutine() {
|
||||
case <-l.closeCh:
|
||||
closed = true
|
||||
case responseCh := <-l.acceptCh:
|
||||
p, err := l.makeServerPipe()
|
||||
if err == nil {
|
||||
// Wait for the client to connect.
|
||||
ch := make(chan error)
|
||||
go func(p *win32File) {
|
||||
ch <- connectPipe(p)
|
||||
}(p)
|
||||
select {
|
||||
case err = <-ch:
|
||||
if err != nil {
|
||||
p.Close()
|
||||
p = nil
|
||||
}
|
||||
case <-l.closeCh:
|
||||
// Abort the connect request by closing the handle.
|
||||
p.Close()
|
||||
p = nil
|
||||
err = <-ch
|
||||
if err == nil || err == ErrFileClosed {
|
||||
err = ErrPipeListenerClosed
|
||||
}
|
||||
closed = true
|
||||
var (
|
||||
p *win32File
|
||||
err error
|
||||
)
|
||||
for {
|
||||
p, err = l.makeConnectedServerPipe()
|
||||
// If the connection was immediately closed by the client, try
|
||||
// again.
|
||||
if err != cERROR_NO_DATA {
|
||||
break
|
||||
}
|
||||
}
|
||||
responseCh <- acceptResponse{p, err}
|
||||
closed = err == ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
syscall.Close(l.firstHandle)
|
||||
|
||||
29
vendor/github.com/Microsoft/go-winio/pipe_test.go
generated
vendored
29
vendor/github.com/Microsoft/go-winio/pipe_test.go
generated
vendored
@@ -422,3 +422,32 @@ func TestEchoWithMessaging(t *testing.T) {
|
||||
<-listenerDone
|
||||
<-clientDone
|
||||
}
|
||||
|
||||
func TestConnectRace(t *testing.T) {
|
||||
l, err := ListenPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
go func() {
|
||||
for {
|
||||
s, err := l.Accept()
|
||||
if err == ErrPipeListenerClosed {
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
c, err := DialPipe(testPipeName, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c.Close()
|
||||
}
|
||||
}
|
||||
|
||||
901
vendor/github.com/Microsoft/go-winio/vhd/mksyscall_windows.go
generated
vendored
Normal file
901
vendor/github.com/Microsoft/go-winio/vhd/mksyscall_windows.go
generated
vendored
Normal file
@@ -0,0 +1,901 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Hard-coding unicode mode for VHD library.
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
mksyscall_windows generates windows system call bodies
|
||||
|
||||
It parses all files specified on command line containing function
|
||||
prototypes (like syscall_windows.go) and prints system call bodies
|
||||
to standard output.
|
||||
|
||||
The prototypes are marked by lines beginning with "//sys" and read
|
||||
like func declarations if //sys is replaced by func, but:
|
||||
|
||||
* The parameter lists must give a name for each argument. This
|
||||
includes return parameters.
|
||||
|
||||
* The parameter lists must give a type for each argument:
|
||||
the (x, y, z int) shorthand is not allowed.
|
||||
|
||||
* If the return parameter is an error number, it must be named err.
|
||||
|
||||
* If go func name needs to be different from it's winapi dll name,
|
||||
the winapi name could be specified at the end, after "=" sign, like
|
||||
//sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
|
||||
|
||||
* Each function that returns err needs to supply a condition, that
|
||||
return value of winapi will be tested against to detect failure.
|
||||
This would set err to windows "last-error", otherwise it will be nil.
|
||||
The value can be provided at end of //sys declaration, like
|
||||
//sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
|
||||
and is [failretval==0] by default.
|
||||
|
||||
Usage:
|
||||
mksyscall_windows [flags] [path ...]
|
||||
|
||||
The flags are:
|
||||
-output
|
||||
Specify output file name (outputs to console if blank).
|
||||
-trace
|
||||
Generate print statement after every syscall.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var (
|
||||
filename = flag.String("output", "", "output file name (standard output if omitted)")
|
||||
printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall")
|
||||
systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory")
|
||||
)
|
||||
|
||||
func trim(s string) string {
|
||||
return strings.Trim(s, " \t")
|
||||
}
|
||||
|
||||
var packageName string
|
||||
|
||||
func packagename() string {
|
||||
return packageName
|
||||
}
|
||||
|
||||
func syscalldot() string {
|
||||
if packageName == "syscall" {
|
||||
return ""
|
||||
}
|
||||
return "syscall."
|
||||
}
|
||||
|
||||
// Param is function parameter
|
||||
type Param struct {
|
||||
Name string
|
||||
Type string
|
||||
fn *Fn
|
||||
tmpVarIdx int
|
||||
}
|
||||
|
||||
// tmpVar returns temp variable name that will be used to represent p during syscall.
|
||||
func (p *Param) tmpVar() string {
|
||||
if p.tmpVarIdx < 0 {
|
||||
p.tmpVarIdx = p.fn.curTmpVarIdx
|
||||
p.fn.curTmpVarIdx++
|
||||
}
|
||||
return fmt.Sprintf("_p%d", p.tmpVarIdx)
|
||||
}
|
||||
|
||||
// BoolTmpVarCode returns source code for bool temp variable.
|
||||
func (p *Param) BoolTmpVarCode() string {
|
||||
const code = `var %s uint32
|
||||
if %s {
|
||||
%s = 1
|
||||
} else {
|
||||
%s = 0
|
||||
}`
|
||||
tmp := p.tmpVar()
|
||||
return fmt.Sprintf(code, tmp, p.Name, tmp, tmp)
|
||||
}
|
||||
|
||||
// SliceTmpVarCode returns source code for slice temp variable.
|
||||
func (p *Param) SliceTmpVarCode() string {
|
||||
const code = `var %s *%s
|
||||
if len(%s) > 0 {
|
||||
%s = &%s[0]
|
||||
}`
|
||||
tmp := p.tmpVar()
|
||||
return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name)
|
||||
}
|
||||
|
||||
// StringTmpVarCode returns source code for string temp variable.
|
||||
func (p *Param) StringTmpVarCode() string {
|
||||
errvar := p.fn.Rets.ErrorVarName()
|
||||
if errvar == "" {
|
||||
errvar = "_"
|
||||
}
|
||||
tmp := p.tmpVar()
|
||||
const code = `var %s %s
|
||||
%s, %s = %s(%s)`
|
||||
s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name)
|
||||
if errvar == "-" {
|
||||
return s
|
||||
}
|
||||
const morecode = `
|
||||
if %s != nil {
|
||||
return
|
||||
}`
|
||||
return s + fmt.Sprintf(morecode, errvar)
|
||||
}
|
||||
|
||||
// TmpVarCode returns source code for temp variable.
|
||||
func (p *Param) TmpVarCode() string {
|
||||
switch {
|
||||
case p.Type == "bool":
|
||||
return p.BoolTmpVarCode()
|
||||
case strings.HasPrefix(p.Type, "[]"):
|
||||
return p.SliceTmpVarCode()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// TmpVarHelperCode returns source code for helper's temp variable.
|
||||
func (p *Param) TmpVarHelperCode() string {
|
||||
if p.Type != "string" {
|
||||
return ""
|
||||
}
|
||||
return p.StringTmpVarCode()
|
||||
}
|
||||
|
||||
// SyscallArgList returns source code fragments representing p parameter
|
||||
// in syscall. Slices are translated into 2 syscall parameters: pointer to
|
||||
// the first element and length.
|
||||
func (p *Param) SyscallArgList() []string {
|
||||
t := p.HelperType()
|
||||
var s string
|
||||
switch {
|
||||
case t[0] == '*':
|
||||
s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name)
|
||||
case t == "bool":
|
||||
s = p.tmpVar()
|
||||
case strings.HasPrefix(t, "[]"):
|
||||
return []string{
|
||||
fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()),
|
||||
fmt.Sprintf("uintptr(len(%s))", p.Name),
|
||||
}
|
||||
default:
|
||||
s = p.Name
|
||||
}
|
||||
return []string{fmt.Sprintf("uintptr(%s)", s)}
|
||||
}
|
||||
|
||||
// IsError determines if p parameter is used to return error.
|
||||
func (p *Param) IsError() bool {
|
||||
return p.Name == "err" && p.Type == "error"
|
||||
}
|
||||
|
||||
// HelperType returns type of parameter p used in helper function.
|
||||
func (p *Param) HelperType() string {
|
||||
if p.Type == "string" {
|
||||
return p.fn.StrconvType()
|
||||
}
|
||||
return p.Type
|
||||
}
|
||||
|
||||
// join concatenates parameters ps into a string with sep separator.
|
||||
// Each parameter is converted into string by applying fn to it
|
||||
// before conversion.
|
||||
func join(ps []*Param, fn func(*Param) string, sep string) string {
|
||||
if len(ps) == 0 {
|
||||
return ""
|
||||
}
|
||||
a := make([]string, 0)
|
||||
for _, p := range ps {
|
||||
a = append(a, fn(p))
|
||||
}
|
||||
return strings.Join(a, sep)
|
||||
}
|
||||
|
||||
// Rets describes function return parameters.
|
||||
type Rets struct {
|
||||
Name string
|
||||
Type string
|
||||
ReturnsError bool
|
||||
FailCond string
|
||||
}
|
||||
|
||||
// ErrorVarName returns error variable name for r.
|
||||
func (r *Rets) ErrorVarName() string {
|
||||
if r.ReturnsError {
|
||||
return "err"
|
||||
}
|
||||
if r.Type == "error" {
|
||||
return r.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ToParams converts r into slice of *Param.
|
||||
func (r *Rets) ToParams() []*Param {
|
||||
ps := make([]*Param, 0)
|
||||
if len(r.Name) > 0 {
|
||||
ps = append(ps, &Param{Name: r.Name, Type: r.Type})
|
||||
}
|
||||
if r.ReturnsError {
|
||||
ps = append(ps, &Param{Name: "err", Type: "error"})
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
// List returns source code of syscall return parameters.
|
||||
func (r *Rets) List() string {
|
||||
s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ")
|
||||
if len(s) > 0 {
|
||||
s = "(" + s + ")"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// PrintList returns source code of trace printing part correspondent
|
||||
// to syscall return values.
|
||||
func (r *Rets) PrintList() string {
|
||||
return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
|
||||
}
|
||||
|
||||
// SetReturnValuesCode returns source code that accepts syscall return values.
|
||||
func (r *Rets) SetReturnValuesCode() string {
|
||||
if r.Name == "" && !r.ReturnsError {
|
||||
return ""
|
||||
}
|
||||
retvar := "r0"
|
||||
if r.Name == "" {
|
||||
retvar = "r1"
|
||||
}
|
||||
errvar := "_"
|
||||
if r.ReturnsError {
|
||||
errvar = "e1"
|
||||
}
|
||||
return fmt.Sprintf("%s, _, %s := ", retvar, errvar)
|
||||
}
|
||||
|
||||
func (r *Rets) useLongHandleErrorCode(retvar string) string {
|
||||
const code = `if %s {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = %sEINVAL
|
||||
}
|
||||
}`
|
||||
cond := retvar + " == 0"
|
||||
if r.FailCond != "" {
|
||||
cond = strings.Replace(r.FailCond, "failretval", retvar, 1)
|
||||
}
|
||||
return fmt.Sprintf(code, cond, syscalldot())
|
||||
}
|
||||
|
||||
// SetErrorCode returns source code that sets return parameters.
|
||||
func (r *Rets) SetErrorCode() string {
|
||||
const code = `if r0 != 0 {
|
||||
%s = %sErrno(r0)
|
||||
}`
|
||||
if r.Name == "" && !r.ReturnsError {
|
||||
return ""
|
||||
}
|
||||
if r.Name == "" {
|
||||
return r.useLongHandleErrorCode("r1")
|
||||
}
|
||||
if r.Type == "error" {
|
||||
return fmt.Sprintf(code, r.Name, syscalldot())
|
||||
}
|
||||
s := ""
|
||||
switch {
|
||||
case r.Type[0] == '*':
|
||||
s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type)
|
||||
case r.Type == "bool":
|
||||
s = fmt.Sprintf("%s = r0 != 0", r.Name)
|
||||
default:
|
||||
s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type)
|
||||
}
|
||||
if !r.ReturnsError {
|
||||
return s
|
||||
}
|
||||
return s + "\n\t" + r.useLongHandleErrorCode(r.Name)
|
||||
}
|
||||
|
||||
// Fn describes syscall function.
|
||||
type Fn struct {
|
||||
Name string
|
||||
Params []*Param
|
||||
Rets *Rets
|
||||
PrintTrace bool
|
||||
dllname string
|
||||
dllfuncname string
|
||||
src string
|
||||
// TODO: get rid of this field and just use parameter index instead
|
||||
curTmpVarIdx int // insure tmp variables have uniq names
|
||||
}
|
||||
|
||||
// extractParams parses s to extract function parameters.
|
||||
func extractParams(s string, f *Fn) ([]*Param, error) {
|
||||
s = trim(s)
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
a := strings.Split(s, ",")
|
||||
ps := make([]*Param, len(a))
|
||||
for i := range ps {
|
||||
s2 := trim(a[i])
|
||||
b := strings.Split(s2, " ")
|
||||
if len(b) != 2 {
|
||||
b = strings.Split(s2, "\t")
|
||||
if len(b) != 2 {
|
||||
return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"")
|
||||
}
|
||||
}
|
||||
ps[i] = &Param{
|
||||
Name: trim(b[0]),
|
||||
Type: trim(b[1]),
|
||||
fn: f,
|
||||
tmpVarIdx: -1,
|
||||
}
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
// extractSection extracts text out of string s starting after start
|
||||
// and ending just before end. found return value will indicate success,
|
||||
// and prefix, body and suffix will contain correspondent parts of string s.
|
||||
func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) {
|
||||
s = trim(s)
|
||||
if strings.HasPrefix(s, string(start)) {
|
||||
// no prefix
|
||||
body = s[1:]
|
||||
} else {
|
||||
a := strings.SplitN(s, string(start), 2)
|
||||
if len(a) != 2 {
|
||||
return "", "", s, false
|
||||
}
|
||||
prefix = a[0]
|
||||
body = a[1]
|
||||
}
|
||||
a := strings.SplitN(body, string(end), 2)
|
||||
if len(a) != 2 {
|
||||
return "", "", "", false
|
||||
}
|
||||
return prefix, a[0], a[1], true
|
||||
}
|
||||
|
||||
// newFn parses string s and return created function Fn.
|
||||
func newFn(s string) (*Fn, error) {
|
||||
s = trim(s)
|
||||
f := &Fn{
|
||||
Rets: &Rets{},
|
||||
src: s,
|
||||
PrintTrace: *printTraceFlag,
|
||||
}
|
||||
// function name and args
|
||||
prefix, body, s, found := extractSection(s, '(', ')')
|
||||
if !found || prefix == "" {
|
||||
return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"")
|
||||
}
|
||||
f.Name = prefix
|
||||
var err error
|
||||
f.Params, err = extractParams(body, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// return values
|
||||
_, body, s, found = extractSection(s, '(', ')')
|
||||
if found {
|
||||
r, err := extractParams(body, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(r) {
|
||||
case 0:
|
||||
case 1:
|
||||
if r[0].IsError() {
|
||||
f.Rets.ReturnsError = true
|
||||
} else {
|
||||
f.Rets.Name = r[0].Name
|
||||
f.Rets.Type = r[0].Type
|
||||
}
|
||||
case 2:
|
||||
if !r[1].IsError() {
|
||||
return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"")
|
||||
}
|
||||
f.Rets.ReturnsError = true
|
||||
f.Rets.Name = r[0].Name
|
||||
f.Rets.Type = r[0].Type
|
||||
default:
|
||||
return nil, errors.New("Too many return values in \"" + f.src + "\"")
|
||||
}
|
||||
}
|
||||
// fail condition
|
||||
_, body, s, found = extractSection(s, '[', ']')
|
||||
if found {
|
||||
f.Rets.FailCond = body
|
||||
}
|
||||
// dll and dll function names
|
||||
s = trim(s)
|
||||
if s == "" {
|
||||
return f, nil
|
||||
}
|
||||
if !strings.HasPrefix(s, "=") {
|
||||
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
|
||||
}
|
||||
s = trim(s[1:])
|
||||
a := strings.Split(s, ".")
|
||||
switch len(a) {
|
||||
case 1:
|
||||
f.dllfuncname = a[0]
|
||||
case 2:
|
||||
f.dllname = a[0]
|
||||
f.dllfuncname = a[1]
|
||||
default:
|
||||
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// DLLName returns DLL name for function f.
|
||||
func (f *Fn) DLLName() string {
|
||||
if f.dllname == "" {
|
||||
return "kernel32"
|
||||
}
|
||||
return f.dllname
|
||||
}
|
||||
|
||||
// DLLName returns DLL function name for function f.
|
||||
func (f *Fn) DLLFuncName() string {
|
||||
if f.dllfuncname == "" {
|
||||
return f.Name
|
||||
}
|
||||
return f.dllfuncname
|
||||
}
|
||||
|
||||
// ParamList returns source code for function f parameters.
|
||||
func (f *Fn) ParamList() string {
|
||||
return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ")
|
||||
}
|
||||
|
||||
// HelperParamList returns source code for helper function f parameters.
|
||||
func (f *Fn) HelperParamList() string {
|
||||
return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ")
|
||||
}
|
||||
|
||||
// ParamPrintList returns source code of trace printing part correspondent
|
||||
// to syscall input parameters.
|
||||
func (f *Fn) ParamPrintList() string {
|
||||
return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
|
||||
}
|
||||
|
||||
// ParamCount return number of syscall parameters for function f.
|
||||
func (f *Fn) ParamCount() int {
|
||||
n := 0
|
||||
for _, p := range f.Params {
|
||||
n += len(p.SyscallArgList())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/...
|
||||
// to use. It returns parameter count for correspondent SyscallX function.
|
||||
func (f *Fn) SyscallParamCount() int {
|
||||
n := f.ParamCount()
|
||||
switch {
|
||||
case n <= 3:
|
||||
return 3
|
||||
case n <= 6:
|
||||
return 6
|
||||
case n <= 9:
|
||||
return 9
|
||||
case n <= 12:
|
||||
return 12
|
||||
case n <= 15:
|
||||
return 15
|
||||
default:
|
||||
panic("too many arguments to system call")
|
||||
}
|
||||
}
|
||||
|
||||
// Syscall determines which SyscallX function to use for function f.
|
||||
func (f *Fn) Syscall() string {
|
||||
c := f.SyscallParamCount()
|
||||
if c == 3 {
|
||||
return syscalldot() + "Syscall"
|
||||
}
|
||||
return syscalldot() + "Syscall" + strconv.Itoa(c)
|
||||
}
|
||||
|
||||
// SyscallParamList returns source code for SyscallX parameters for function f.
|
||||
func (f *Fn) SyscallParamList() string {
|
||||
a := make([]string, 0)
|
||||
for _, p := range f.Params {
|
||||
a = append(a, p.SyscallArgList()...)
|
||||
}
|
||||
for len(a) < f.SyscallParamCount() {
|
||||
a = append(a, "0")
|
||||
}
|
||||
return strings.Join(a, ", ")
|
||||
}
|
||||
|
||||
// HelperCallParamList returns source code of call into function f helper.
|
||||
func (f *Fn) HelperCallParamList() string {
|
||||
a := make([]string, 0, len(f.Params))
|
||||
for _, p := range f.Params {
|
||||
s := p.Name
|
||||
if p.Type == "string" {
|
||||
s = p.tmpVar()
|
||||
}
|
||||
a = append(a, s)
|
||||
}
|
||||
return strings.Join(a, ", ")
|
||||
}
|
||||
|
||||
// IsUTF16 is true, if f is W (utf16) function. It is false
|
||||
// for all A (ascii) functions.
|
||||
func (f *Fn) IsUTF16() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// StrconvFunc returns name of Go string to OS string function for f.
|
||||
func (f *Fn) StrconvFunc() string {
|
||||
if f.IsUTF16() {
|
||||
return syscalldot() + "UTF16PtrFromString"
|
||||
}
|
||||
return syscalldot() + "BytePtrFromString"
|
||||
}
|
||||
|
||||
// StrconvType returns Go type name used for OS string for f.
|
||||
func (f *Fn) StrconvType() string {
|
||||
if f.IsUTF16() {
|
||||
return "*uint16"
|
||||
}
|
||||
return "*byte"
|
||||
}
|
||||
|
||||
// HasStringParam is true, if f has at least one string parameter.
|
||||
// Otherwise it is false.
|
||||
func (f *Fn) HasStringParam() bool {
|
||||
for _, p := range f.Params {
|
||||
if p.Type == "string" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HelperName returns name of function f helper.
|
||||
func (f *Fn) HelperName() string {
|
||||
if !f.HasStringParam() {
|
||||
return f.Name
|
||||
}
|
||||
return "_" + f.Name
|
||||
}
|
||||
|
||||
// Source files and functions.
|
||||
type Source struct {
|
||||
Funcs []*Fn
|
||||
Files []string
|
||||
StdLibImports []string
|
||||
ExternalImports []string
|
||||
}
|
||||
|
||||
func (src *Source) Import(pkg string) {
|
||||
src.StdLibImports = append(src.StdLibImports, pkg)
|
||||
sort.Strings(src.StdLibImports)
|
||||
}
|
||||
|
||||
func (src *Source) ExternalImport(pkg string) {
|
||||
src.ExternalImports = append(src.ExternalImports, pkg)
|
||||
sort.Strings(src.ExternalImports)
|
||||
}
|
||||
|
||||
// ParseFiles parses files listed in fs and extracts all syscall
|
||||
// functions listed in sys comments. It returns source files
|
||||
// and functions collection *Source if successful.
|
||||
func ParseFiles(fs []string) (*Source, error) {
|
||||
src := &Source{
|
||||
Funcs: make([]*Fn, 0),
|
||||
Files: make([]string, 0),
|
||||
StdLibImports: []string{
|
||||
"unsafe",
|
||||
},
|
||||
ExternalImports: make([]string, 0),
|
||||
}
|
||||
for _, file := range fs {
|
||||
if err := src.ParseFile(file); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return src, nil
|
||||
}
|
||||
|
||||
// DLLs return dll names for a source set src.
|
||||
func (src *Source) DLLs() []string {
|
||||
uniq := make(map[string]bool)
|
||||
r := make([]string, 0)
|
||||
for _, f := range src.Funcs {
|
||||
name := f.DLLName()
|
||||
if _, found := uniq[name]; !found {
|
||||
uniq[name] = true
|
||||
r = append(r, name)
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// ParseFile adds additional file path to a source set src.
|
||||
func (src *Source) ParseFile(path string) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
s := bufio.NewScanner(file)
|
||||
for s.Scan() {
|
||||
t := trim(s.Text())
|
||||
if len(t) < 7 {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(t, "//sys") {
|
||||
continue
|
||||
}
|
||||
t = t[5:]
|
||||
if !(t[0] == ' ' || t[0] == '\t') {
|
||||
continue
|
||||
}
|
||||
f, err := newFn(t[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
src.Funcs = append(src.Funcs, f)
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
src.Files = append(src.Files, path)
|
||||
|
||||
// get package name
|
||||
fset := token.NewFileSet()
|
||||
_, err = file.Seek(0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
packageName = pkg.Name.Name
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsStdRepo returns true if src is part of standard library.
|
||||
func (src *Source) IsStdRepo() (bool, error) {
|
||||
if len(src.Files) == 0 {
|
||||
return false, errors.New("no input files provided")
|
||||
}
|
||||
abspath, err := filepath.Abs(src.Files[0])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
goroot := runtime.GOROOT()
|
||||
if runtime.GOOS == "windows" {
|
||||
abspath = strings.ToLower(abspath)
|
||||
goroot = strings.ToLower(goroot)
|
||||
}
|
||||
sep := string(os.PathSeparator)
|
||||
if !strings.HasSuffix(goroot, sep) {
|
||||
goroot += sep
|
||||
}
|
||||
return strings.HasPrefix(abspath, goroot), nil
|
||||
}
|
||||
|
||||
// Generate output source file from a source set src.
|
||||
func (src *Source) Generate(w io.Writer) error {
|
||||
const (
|
||||
pkgStd = iota // any package in std library
|
||||
pkgXSysWindows // x/sys/windows package
|
||||
pkgOther
|
||||
)
|
||||
isStdRepo, err := src.IsStdRepo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var pkgtype int
|
||||
switch {
|
||||
case isStdRepo:
|
||||
pkgtype = pkgStd
|
||||
case packageName == "windows":
|
||||
// TODO: this needs better logic than just using package name
|
||||
pkgtype = pkgXSysWindows
|
||||
default:
|
||||
pkgtype = pkgOther
|
||||
}
|
||||
if *systemDLL {
|
||||
switch pkgtype {
|
||||
case pkgStd:
|
||||
src.Import("internal/syscall/windows/sysdll")
|
||||
case pkgXSysWindows:
|
||||
default:
|
||||
src.ExternalImport("golang.org/x/sys/windows")
|
||||
}
|
||||
}
|
||||
if packageName != "syscall" {
|
||||
src.Import("syscall")
|
||||
}
|
||||
funcMap := template.FuncMap{
|
||||
"packagename": packagename,
|
||||
"syscalldot": syscalldot,
|
||||
"newlazydll": func(dll string) string {
|
||||
arg := "\"" + dll + ".dll\""
|
||||
if !*systemDLL {
|
||||
return syscalldot() + "NewLazyDLL(" + arg + ")"
|
||||
}
|
||||
switch pkgtype {
|
||||
case pkgStd:
|
||||
return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))"
|
||||
case pkgXSysWindows:
|
||||
return "NewLazySystemDLL(" + arg + ")"
|
||||
default:
|
||||
return "windows.NewLazySystemDLL(" + arg + ")"
|
||||
}
|
||||
},
|
||||
}
|
||||
t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate))
|
||||
err = t.Execute(w, src)
|
||||
if err != nil {
|
||||
return errors.New("Failed to execute template: " + err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n")
|
||||
flag.PrintDefaults()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
if len(flag.Args()) <= 0 {
|
||||
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
|
||||
usage()
|
||||
}
|
||||
|
||||
src, err := ParseFiles(flag.Args())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := src.Generate(&buf); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
data, err := format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if *filename == "" {
|
||||
_, err = os.Stdout.Write(data)
|
||||
} else {
|
||||
err = ioutil.WriteFile(*filename, data, 0644)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: use println instead to print in the following template
|
||||
const srcTemplate = `
|
||||
|
||||
{{define "main"}}// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
||||
|
||||
package {{packagename}}
|
||||
|
||||
import (
|
||||
{{range .StdLibImports}}"{{.}}"
|
||||
{{end}}
|
||||
|
||||
{{range .ExternalImports}}"{{.}}"
|
||||
{{end}}
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e {{syscalldot}}Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
{{template "dlls" .}}
|
||||
{{template "funcnames" .}})
|
||||
{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}}
|
||||
{{end}}
|
||||
|
||||
{{/* help functions */}}
|
||||
|
||||
{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}}
|
||||
{{end}}{{end}}
|
||||
|
||||
{{define "funcnames"}}{{range .Funcs}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}")
|
||||
{{end}}{{end}}
|
||||
|
||||
{{define "helperbody"}}
|
||||
func {{.Name}}({{.ParamList}}) {{template "results" .}}{
|
||||
{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}})
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{define "funcbody"}}
|
||||
func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{
|
||||
{{template "tmpvars" .}} {{template "syscall" .}}
|
||||
{{template "seterror" .}}{{template "printtrace" .}} return
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}}
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}}
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}}
|
||||
|
||||
{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}}
|
||||
|
||||
{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}}
|
||||
{{end}}{{end}}
|
||||
|
||||
{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n")
|
||||
{{end}}{{end}}
|
||||
|
||||
`
|
||||
82
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
Normal file
82
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
// +build windows
|
||||
|
||||
package vhd
|
||||
|
||||
import "syscall"
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zvhd.go vhd.go
|
||||
|
||||
//sys createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.CreateVirtualDisk
|
||||
|
||||
type virtualStorageType struct {
|
||||
DeviceID uint32
|
||||
VendorID [16]byte
|
||||
}
|
||||
|
||||
const virtualDiskAccessNONE uint32 = 0
|
||||
const virtualDiskAccessATTACHRO uint32 = 65536
|
||||
const virtualDiskAccessATTACHRW uint32 = 131072
|
||||
const virtualDiskAccessDETACH uint32 = 262144
|
||||
const virtualDiskAccessGETINFO uint32 = 524288
|
||||
const virtualDiskAccessCREATE uint32 = 1048576
|
||||
const virtualDiskAccessMETAOPS uint32 = 2097152
|
||||
const virtualDiskAccessREAD uint32 = 851968
|
||||
const virtualDiskAccessALL uint32 = 4128768
|
||||
const virtualDiskAccessWRITABLE uint32 = 3276800
|
||||
|
||||
const createVirtualDiskFlagNone uint32 = 0
|
||||
const createVirtualDiskFlagFullPhysicalAllocation uint32 = 1
|
||||
const createVirtualDiskFlagPreventWritesToSourceDisk uint32 = 2
|
||||
const createVirtualDiskFlagDoNotCopyMetadataFromParent uint32 = 4
|
||||
|
||||
type version2 struct {
|
||||
UniqueID [16]byte // GUID
|
||||
MaximumSize uint64
|
||||
BlockSizeInBytes uint32
|
||||
SectorSizeInBytes uint32
|
||||
ParentPath *uint16 // string
|
||||
SourcePath *uint16 // string
|
||||
OpenFlags uint32
|
||||
ParentVirtualStorageType virtualStorageType
|
||||
SourceVirtualStorageType virtualStorageType
|
||||
ResiliencyGUID [16]byte // GUID
|
||||
}
|
||||
|
||||
type createVirtualDiskParameters struct {
|
||||
Version uint32 // Must always be set to 2
|
||||
Version2 version2
|
||||
}
|
||||
|
||||
// CreateVhdx will create a simple vhdx file at the given path using default values.
|
||||
func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
|
||||
var defaultType virtualStorageType
|
||||
|
||||
parameters := createVirtualDiskParameters{
|
||||
Version: 2,
|
||||
Version2: version2{
|
||||
MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024,
|
||||
BlockSizeInBytes: blockSizeInMb * 1024 * 1024,
|
||||
},
|
||||
}
|
||||
|
||||
var handle syscall.Handle
|
||||
|
||||
if err := createVirtualDisk(
|
||||
&defaultType,
|
||||
path,
|
||||
virtualDiskAccessNONE,
|
||||
nil,
|
||||
createVirtualDiskFlagNone,
|
||||
0,
|
||||
¶meters,
|
||||
nil,
|
||||
&handle); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := syscall.CloseHandle(handle); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
64
vendor/github.com/Microsoft/go-winio/vhd/zvhd.go
generated
vendored
Normal file
64
vendor/github.com/Microsoft/go-winio/vhd/zvhd.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
||||
|
||||
package vhd
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modVirtDisk = windows.NewLazySystemDLL("VirtDisk.dll")
|
||||
|
||||
procCreateVirtualDisk = modVirtDisk.NewProc("CreateVirtualDisk")
|
||||
)
|
||||
|
||||
func createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, flags, providerSpecificFlags, parameters, o, handle)
|
||||
}
|
||||
|
||||
func _createVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(flags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(handle)))
|
||||
if r1 != 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
138
vendor/github.com/Microsoft/go-winio/wim/decompress.go
generated
vendored
Normal file
138
vendor/github.com/Microsoft/go-winio/wim/decompress.go
generated
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
package wim
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/Microsoft/go-winio/wim/lzx"
|
||||
)
|
||||
|
||||
const chunkSize = 32768 // Compressed resource chunk size
|
||||
|
||||
type compressedReader struct {
|
||||
r *io.SectionReader
|
||||
d io.ReadCloser
|
||||
chunks []int64
|
||||
curChunk int
|
||||
originalSize int64
|
||||
}
|
||||
|
||||
func newCompressedReader(r *io.SectionReader, originalSize int64, offset int64) (*compressedReader, error) {
|
||||
nchunks := (originalSize + chunkSize - 1) / chunkSize
|
||||
var base int64
|
||||
chunks := make([]int64, nchunks)
|
||||
if originalSize <= 0xffffffff {
|
||||
// 32-bit chunk offsets
|
||||
base = (nchunks - 1) * 4
|
||||
chunks32 := make([]uint32, nchunks-1)
|
||||
err := binary.Read(r, binary.LittleEndian, chunks32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, n := range chunks32 {
|
||||
chunks[i+1] = int64(n)
|
||||
}
|
||||
|
||||
} else {
|
||||
// 64-bit chunk offsets
|
||||
base = (nchunks - 1) * 8
|
||||
err := binary.Read(r, binary.LittleEndian, chunks[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for i, c := range chunks {
|
||||
chunks[i] = c + base
|
||||
}
|
||||
|
||||
cr := &compressedReader{
|
||||
r: r,
|
||||
chunks: chunks,
|
||||
originalSize: originalSize,
|
||||
}
|
||||
|
||||
err := cr.reset(int(offset / chunkSize))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
suboff := offset % chunkSize
|
||||
if suboff != 0 {
|
||||
_, err := io.CopyN(ioutil.Discard, cr.d, suboff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return cr, nil
|
||||
}
|
||||
|
||||
func (r *compressedReader) chunkOffset(n int) int64 {
|
||||
if n == len(r.chunks) {
|
||||
return r.r.Size()
|
||||
}
|
||||
return r.chunks[n]
|
||||
}
|
||||
|
||||
func (r *compressedReader) chunkSize(n int) int {
|
||||
return int(r.chunkOffset(n+1) - r.chunkOffset(n))
|
||||
}
|
||||
|
||||
func (r *compressedReader) uncompressedSize(n int) int {
|
||||
if n < len(r.chunks)-1 {
|
||||
return chunkSize
|
||||
}
|
||||
size := int(r.originalSize % chunkSize)
|
||||
if size == 0 {
|
||||
size = chunkSize
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func (r *compressedReader) reset(n int) error {
|
||||
if n >= len(r.chunks) {
|
||||
return io.EOF
|
||||
}
|
||||
if r.d != nil {
|
||||
r.d.Close()
|
||||
}
|
||||
r.curChunk = n
|
||||
size := r.chunkSize(n)
|
||||
uncompressedSize := r.uncompressedSize(n)
|
||||
section := io.NewSectionReader(r.r, r.chunkOffset(n), int64(size))
|
||||
if size != uncompressedSize {
|
||||
d, err := lzx.NewReader(section, uncompressedSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.d = d
|
||||
} else {
|
||||
r.d = ioutil.NopCloser(section)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *compressedReader) Read(b []byte) (int, error) {
|
||||
for {
|
||||
n, err := r.d.Read(b)
|
||||
if err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
|
||||
err = r.reset(r.curChunk + 1)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *compressedReader) Close() error {
|
||||
var err error
|
||||
if r.d != nil {
|
||||
err = r.d.Close()
|
||||
r.d = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
606
vendor/github.com/Microsoft/go-winio/wim/lzx/lzx.go
generated
vendored
Normal file
606
vendor/github.com/Microsoft/go-winio/wim/lzx/lzx.go
generated
vendored
Normal file
@@ -0,0 +1,606 @@
|
||||
// Package lzx implements a decompressor for the the WIM variant of the
|
||||
// LZX compression algorithm.
|
||||
//
|
||||
// The LZX algorithm is an earlier variant of LZX DELTA, which is documented
|
||||
// at https://msdn.microsoft.com/en-us/library/cc483133(v=exchg.80).aspx.
|
||||
package lzx
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
maincodecount = 496
|
||||
maincodesplit = 256
|
||||
lencodecount = 249
|
||||
lenshift = 9
|
||||
codemask = 0x1ff
|
||||
tablebits = 9
|
||||
tablesize = 1 << tablebits
|
||||
|
||||
maxBlockSize = 32768
|
||||
windowSize = 32768
|
||||
|
||||
maxTreePathLen = 16
|
||||
|
||||
e8filesize = 12000000
|
||||
maxe8offset = 0x3fffffff
|
||||
|
||||
verbatimBlock = 1
|
||||
alignedOffsetBlock = 2
|
||||
uncompressedBlock = 3
|
||||
)
|
||||
|
||||
var footerBits = [...]byte{
|
||||
0, 0, 0, 0, 1, 1, 2, 2,
|
||||
3, 3, 4, 4, 5, 5, 6, 6,
|
||||
7, 7, 8, 8, 9, 9, 10, 10,
|
||||
11, 11, 12, 12, 13, 13, 14,
|
||||
}
|
||||
|
||||
var basePosition = [...]uint16{
|
||||
0, 1, 2, 3, 4, 6, 8, 12,
|
||||
16, 24, 32, 48, 64, 96, 128, 192,
|
||||
256, 384, 512, 768, 1024, 1536, 2048, 3072,
|
||||
4096, 6144, 8192, 12288, 16384, 24576, 32768,
|
||||
}
|
||||
|
||||
var (
|
||||
errCorrupt = errors.New("LZX data corrupt")
|
||||
)
|
||||
|
||||
// Reader is an interface used by the decompressor to access
|
||||
// the input stream. If the provided io.Reader does not implement
|
||||
// Reader, then a bufio.Reader is used.
|
||||
type Reader interface {
|
||||
io.Reader
|
||||
io.ByteReader
|
||||
}
|
||||
|
||||
type decompressor struct {
|
||||
r io.Reader
|
||||
err error
|
||||
unaligned bool
|
||||
nbits byte
|
||||
c uint32
|
||||
lru [3]uint16
|
||||
uncompressed int
|
||||
windowReader *bytes.Reader
|
||||
mainlens [maincodecount]byte
|
||||
lenlens [lencodecount]byte
|
||||
window [windowSize]byte
|
||||
b []byte
|
||||
bv int
|
||||
bo int
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func (f *decompressor) fail(err error) {
|
||||
if f.err == nil {
|
||||
f.err = err
|
||||
}
|
||||
f.bo = 0
|
||||
f.bv = 0
|
||||
}
|
||||
|
||||
func (f *decompressor) ensureAtLeast(n int) error {
|
||||
if f.bv-f.bo >= n {
|
||||
return nil
|
||||
}
|
||||
|
||||
if f.err != nil {
|
||||
return f.err
|
||||
}
|
||||
|
||||
if f.bv != f.bo {
|
||||
copy(f.b[:f.bv-f.bo], f.b[f.bo:f.bv])
|
||||
}
|
||||
n, err := io.ReadAtLeast(f.r, f.b[f.bv-f.bo:], n)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
} else {
|
||||
f.fail(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
f.bv = f.bv - f.bo + n
|
||||
f.bo = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// feed retrieves another 16-bit word from the stream and consumes
|
||||
// it into f.c. It returns false if there are no more bytes available.
|
||||
// Otherwise, on error, it sets f.err.
|
||||
func (f *decompressor) feed() bool {
|
||||
err := f.ensureAtLeast(2)
|
||||
if err != nil {
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return false
|
||||
}
|
||||
}
|
||||
f.c |= (uint32(f.b[f.bo+1])<<8 | uint32(f.b[f.bo])) << (16 - f.nbits)
|
||||
f.nbits += 16
|
||||
f.bo += 2
|
||||
return true
|
||||
}
|
||||
|
||||
// getBits retrieves the next n bits from the byte stream. n
|
||||
// must be <= 16. It sets f.err on error.
|
||||
func (f *decompressor) getBits(n byte) uint16 {
|
||||
if f.nbits < n {
|
||||
if !f.feed() {
|
||||
f.fail(io.ErrUnexpectedEOF)
|
||||
}
|
||||
}
|
||||
c := uint16(f.c >> (32 - n))
|
||||
f.c <<= n
|
||||
f.nbits -= n
|
||||
return c
|
||||
}
|
||||
|
||||
type huffman struct {
|
||||
extra [][]uint16
|
||||
maxbits byte
|
||||
table [tablesize]uint16
|
||||
}
|
||||
|
||||
// buildTable builds a huffman decoding table from a slice of code lengths,
|
||||
// one per code, in order. Each code length must be <= maxTreePathLen.
|
||||
// See https://en.wikipedia.org/wiki/Canonical_Huffman_code.
|
||||
func buildTable(codelens []byte) *huffman {
|
||||
// Determine the number of codes of each length, and the
|
||||
// maximum length.
|
||||
var count [maxTreePathLen + 1]uint
|
||||
var max byte
|
||||
for _, cl := range codelens {
|
||||
count[cl]++
|
||||
if max < cl {
|
||||
max = cl
|
||||
}
|
||||
}
|
||||
|
||||
if max == 0 {
|
||||
return &huffman{}
|
||||
}
|
||||
|
||||
// Determine the first code of each length.
|
||||
var first [maxTreePathLen + 1]uint
|
||||
code := uint(0)
|
||||
for i := byte(1); i <= max; i++ {
|
||||
code <<= 1
|
||||
first[i] = code
|
||||
code += count[i]
|
||||
}
|
||||
|
||||
if code != 1<<max {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build a table for code lookup. For code sizes < max,
|
||||
// put all possible suffixes for the code into the table, too.
|
||||
// For max > tablebits, split long codes into additional tables
|
||||
// of suffixes of max-tablebits length.
|
||||
h := &huffman{maxbits: max}
|
||||
if max > tablebits {
|
||||
core := first[tablebits+1] / 2 // Number of codes that fit without extra tables
|
||||
nextra := 1<<tablebits - core // Number of extra entries
|
||||
h.extra = make([][]uint16, nextra)
|
||||
for code := core; code < 1<<tablebits; code++ {
|
||||
h.table[code] = uint16(code - core)
|
||||
h.extra[code-core] = make([]uint16, 1<<(max-tablebits))
|
||||
}
|
||||
}
|
||||
|
||||
for i, cl := range codelens {
|
||||
if cl != 0 {
|
||||
code := first[cl]
|
||||
first[cl]++
|
||||
v := uint16(cl)<<lenshift | uint16(i)
|
||||
if cl <= tablebits {
|
||||
extendedCode := code << (tablebits - cl)
|
||||
for j := uint(0); j < 1<<(tablebits-cl); j++ {
|
||||
h.table[extendedCode+j] = v
|
||||
}
|
||||
} else {
|
||||
prefix := code >> (cl - tablebits)
|
||||
suffix := code & (1<<(cl-tablebits) - 1)
|
||||
extendedCode := suffix << (max - cl)
|
||||
for j := uint(0); j < 1<<(max-cl); j++ {
|
||||
h.extra[h.table[prefix]][extendedCode+j] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// getCode retrieves the next code using the provided
|
||||
// huffman tree. It sets f.err on error.
|
||||
func (f *decompressor) getCode(h *huffman) uint16 {
|
||||
if h.maxbits > 0 {
|
||||
if f.nbits < maxTreePathLen {
|
||||
f.feed()
|
||||
}
|
||||
|
||||
// For codes with length < tablebits, it doesn't matter
|
||||
// what the remainder of the bits used for table lookup
|
||||
// are, since entries with all possible suffixes were
|
||||
// added to the table.
|
||||
c := h.table[f.c>>(32-tablebits)]
|
||||
if c >= 1<<lenshift {
|
||||
// The code is already in c.
|
||||
} else {
|
||||
c = h.extra[c][f.c<<tablebits>>(32-(h.maxbits-tablebits))]
|
||||
}
|
||||
|
||||
n := byte(c >> lenshift)
|
||||
if f.nbits >= n {
|
||||
// Only consume the length of the code, not the maximum
|
||||
// code length.
|
||||
f.c <<= n
|
||||
f.nbits -= n
|
||||
return c & codemask
|
||||
}
|
||||
|
||||
f.fail(io.ErrUnexpectedEOF)
|
||||
return 0
|
||||
}
|
||||
|
||||
// This is an empty tree. It should not be used.
|
||||
f.fail(errCorrupt)
|
||||
return 0
|
||||
}
|
||||
|
||||
// readTree updates the huffman tree path lengths in lens by
|
||||
// reading and decoding lengths from the byte stream. lens
|
||||
// should be prepopulated with the previous block's tree's path
|
||||
// lengths. For the first block, lens should be zero.
|
||||
func (f *decompressor) readTree(lens []byte) error {
|
||||
// Get the pre-tree for the main tree.
|
||||
var pretreeLen [20]byte
|
||||
for i := range pretreeLen {
|
||||
pretreeLen[i] = byte(f.getBits(4))
|
||||
}
|
||||
if f.err != nil {
|
||||
return f.err
|
||||
}
|
||||
h := buildTable(pretreeLen[:])
|
||||
|
||||
// The lengths are encoded as a series of huffman codes
|
||||
// encoded by the pre-tree.
|
||||
for i := 0; i < len(lens); {
|
||||
c := byte(f.getCode(h))
|
||||
if f.err != nil {
|
||||
return f.err
|
||||
}
|
||||
switch {
|
||||
case c <= 16: // length is delta from previous length
|
||||
lens[i] = (lens[i] + 17 - c) % 17
|
||||
i++
|
||||
case c == 17: // next n + 4 lengths are zero
|
||||
zeroes := int(f.getBits(4)) + 4
|
||||
if i+zeroes > len(lens) {
|
||||
return errCorrupt
|
||||
}
|
||||
for j := 0; j < zeroes; j++ {
|
||||
lens[i+j] = 0
|
||||
}
|
||||
i += zeroes
|
||||
case c == 18: // next n + 20 lengths are zero
|
||||
zeroes := int(f.getBits(5)) + 20
|
||||
if i+zeroes > len(lens) {
|
||||
return errCorrupt
|
||||
}
|
||||
for j := 0; j < zeroes; j++ {
|
||||
lens[i+j] = 0
|
||||
}
|
||||
i += zeroes
|
||||
case c == 19: // next n + 4 lengths all have the same value
|
||||
same := int(f.getBits(1)) + 4
|
||||
if i+same > len(lens) {
|
||||
return errCorrupt
|
||||
}
|
||||
c = byte(f.getCode(h))
|
||||
if c > 16 {
|
||||
return errCorrupt
|
||||
}
|
||||
l := (lens[i] + 17 - c) % 17
|
||||
for j := 0; j < same; j++ {
|
||||
lens[i+j] = l
|
||||
}
|
||||
i += same
|
||||
default:
|
||||
return errCorrupt
|
||||
}
|
||||
}
|
||||
|
||||
if f.err != nil {
|
||||
return f.err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *decompressor) readBlockHeader() (byte, uint16, error) {
|
||||
// If the previous block was an unaligned uncompressed block, restore
|
||||
// 2-byte alignment.
|
||||
if f.unaligned {
|
||||
err := f.ensureAtLeast(1)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
f.bo++
|
||||
f.unaligned = false
|
||||
}
|
||||
|
||||
blockType := f.getBits(3)
|
||||
full := f.getBits(1)
|
||||
var blockSize uint16
|
||||
if full != 0 {
|
||||
blockSize = maxBlockSize
|
||||
} else {
|
||||
blockSize = f.getBits(16)
|
||||
if blockSize > maxBlockSize {
|
||||
return 0, 0, errCorrupt
|
||||
}
|
||||
}
|
||||
|
||||
if f.err != nil {
|
||||
return 0, 0, f.err
|
||||
}
|
||||
|
||||
switch blockType {
|
||||
case verbatimBlock, alignedOffsetBlock:
|
||||
// The caller will read the huffman trees.
|
||||
case uncompressedBlock:
|
||||
if f.nbits > 16 {
|
||||
panic("impossible: more than one 16-bit word remains")
|
||||
}
|
||||
|
||||
// Drop the remaining bits in the current 16-bit word
|
||||
// If there are no bits left, discard a full 16-bit word.
|
||||
n := f.nbits
|
||||
if n == 0 {
|
||||
n = 16
|
||||
}
|
||||
|
||||
f.getBits(n)
|
||||
|
||||
// Read the LRU values for the next block.
|
||||
err := f.ensureAtLeast(12)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
f.lru[0] = uint16(binary.LittleEndian.Uint32(f.b[f.bo : f.bo+4]))
|
||||
f.lru[1] = uint16(binary.LittleEndian.Uint32(f.b[f.bo+4 : f.bo+8]))
|
||||
f.lru[2] = uint16(binary.LittleEndian.Uint32(f.b[f.bo+8 : f.bo+12]))
|
||||
f.bo += 12
|
||||
|
||||
default:
|
||||
return 0, 0, errCorrupt
|
||||
}
|
||||
|
||||
return byte(blockType), blockSize, nil
|
||||
}
|
||||
|
||||
// readTrees reads the two or three huffman trees for the current block.
|
||||
// readAligned specifies whether to read the aligned offset tree.
|
||||
func (f *decompressor) readTrees(readAligned bool) (main *huffman, length *huffman, aligned *huffman, err error) {
|
||||
// Aligned offset blocks start with a small aligned offset tree.
|
||||
if readAligned {
|
||||
var alignedLen [8]byte
|
||||
for i := range alignedLen {
|
||||
alignedLen[i] = byte(f.getBits(3))
|
||||
}
|
||||
aligned = buildTable(alignedLen[:])
|
||||
if aligned == nil {
|
||||
err = errors.New("corrupt")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The main tree is encoded in two parts.
|
||||
err = f.readTree(f.mainlens[:maincodesplit])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = f.readTree(f.mainlens[maincodesplit:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
main = buildTable(f.mainlens[:])
|
||||
if main == nil {
|
||||
err = errors.New("corrupt")
|
||||
return
|
||||
}
|
||||
|
||||
// The length tree is encoding in a single part.
|
||||
err = f.readTree(f.lenlens[:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
length = buildTable(f.lenlens[:])
|
||||
if length == nil {
|
||||
err = errors.New("corrupt")
|
||||
return
|
||||
}
|
||||
|
||||
err = f.err
|
||||
return
|
||||
}
|
||||
|
||||
// readCompressedBlock decodes a compressed block, writing into the window
|
||||
// starting at start and ending at end, and using the provided huffman trees.
|
||||
func (f *decompressor) readCompressedBlock(start, end uint16, hmain, hlength, haligned *huffman) (int, error) {
|
||||
i := start
|
||||
for i < end {
|
||||
main := f.getCode(hmain)
|
||||
if f.err != nil {
|
||||
break
|
||||
}
|
||||
if main < 256 {
|
||||
// Literal byte.
|
||||
f.window[i] = byte(main)
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a match backward in the window. Determine
|
||||
// the offset and dlength.
|
||||
matchlen := (main - 256) % 8
|
||||
slot := (main - 256) / 8
|
||||
|
||||
// The length is either the low bits of the code,
|
||||
// or if this is 7, is encoded with the length tree.
|
||||
if matchlen == 7 {
|
||||
matchlen += f.getCode(hlength)
|
||||
}
|
||||
matchlen += 2
|
||||
|
||||
var matchoffset uint16
|
||||
if slot < 3 {
|
||||
// The offset is one of the LRU values.
|
||||
matchoffset = f.lru[slot]
|
||||
f.lru[slot] = f.lru[0]
|
||||
f.lru[0] = matchoffset
|
||||
} else {
|
||||
// The offset is encoded as a combination of the
|
||||
// slot and more bits from the bit stream.
|
||||
offsetbits := footerBits[slot]
|
||||
var verbatimbits, alignedbits uint16
|
||||
if offsetbits > 0 {
|
||||
if haligned != nil && offsetbits >= 3 {
|
||||
// This is an aligned offset block. Combine
|
||||
// the bits written verbatim with the aligned
|
||||
// offset tree code.
|
||||
verbatimbits = f.getBits(offsetbits-3) * 8
|
||||
alignedbits = f.getCode(haligned)
|
||||
} else {
|
||||
// There are no aligned offset bits to read,
|
||||
// only verbatim bits.
|
||||
verbatimbits = f.getBits(offsetbits)
|
||||
alignedbits = 0
|
||||
}
|
||||
}
|
||||
matchoffset = basePosition[slot] + verbatimbits + alignedbits - 2
|
||||
// Update the LRU cache.
|
||||
f.lru[2] = f.lru[1]
|
||||
f.lru[1] = f.lru[0]
|
||||
f.lru[0] = matchoffset
|
||||
}
|
||||
|
||||
if matchoffset <= i && matchlen <= end-i {
|
||||
copyend := i + matchlen
|
||||
for ; i < copyend; i++ {
|
||||
f.window[i] = f.window[i-matchoffset]
|
||||
}
|
||||
} else {
|
||||
f.fail(errCorrupt)
|
||||
break
|
||||
}
|
||||
}
|
||||
return int(i - start), f.err
|
||||
}
|
||||
|
||||
// readBlock decodes the current block and returns the number of uncompressed bytes.
|
||||
func (f *decompressor) readBlock(start uint16) (int, error) {
|
||||
blockType, size, err := f.readBlockHeader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if blockType == uncompressedBlock {
|
||||
if size%2 == 1 {
|
||||
// Remember to realign the byte stream at the next block.
|
||||
f.unaligned = true
|
||||
}
|
||||
copied := 0
|
||||
if f.bo < f.bv {
|
||||
copied = int(size)
|
||||
s := int(start)
|
||||
if copied > f.bv-f.bo {
|
||||
copied = f.bv - f.bo
|
||||
}
|
||||
copy(f.window[s:s+copied], f.b[f.bo:f.bo+copied])
|
||||
f.bo += copied
|
||||
}
|
||||
n, err := io.ReadFull(f.r, f.window[start+uint16(copied):start+size])
|
||||
return copied + n, err
|
||||
}
|
||||
|
||||
hmain, hlength, haligned, err := f.readTrees(blockType == alignedOffsetBlock)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return f.readCompressedBlock(start, start+size, hmain, hlength, haligned)
|
||||
}
|
||||
|
||||
// decodeE8 reverses the 0xe8 x86 instruction encoding that was performed
|
||||
// to the uncompressed data before it was compressed.
|
||||
func decodeE8(b []byte, off int64) {
|
||||
if off > maxe8offset || len(b) < 10 {
|
||||
return
|
||||
}
|
||||
for i := 0; i < len(b)-10; i++ {
|
||||
if b[i] == 0xe8 {
|
||||
currentPtr := int32(off) + int32(i)
|
||||
abs := int32(binary.LittleEndian.Uint32(b[i+1 : i+5]))
|
||||
if abs >= -currentPtr && abs < e8filesize {
|
||||
var rel int32
|
||||
if abs >= 0 {
|
||||
rel = abs - currentPtr
|
||||
} else {
|
||||
rel = abs + e8filesize
|
||||
}
|
||||
binary.LittleEndian.PutUint32(b[i+1:i+5], uint32(rel))
|
||||
}
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *decompressor) Read(b []byte) (int, error) {
|
||||
// Read and uncompress everything.
|
||||
if f.windowReader == nil {
|
||||
n := 0
|
||||
for n < f.uncompressed {
|
||||
k, err := f.readBlock(uint16(n))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n += k
|
||||
}
|
||||
decodeE8(f.window[:f.uncompressed], 0)
|
||||
f.windowReader = bytes.NewReader(f.window[:f.uncompressed])
|
||||
}
|
||||
|
||||
// Just read directly from the window.
|
||||
return f.windowReader.Read(b)
|
||||
}
|
||||
|
||||
func (f *decompressor) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewReader returns a new io.ReadCloser that decompresses a
|
||||
// WIM LZX stream until uncompressedSize bytes have been returned.
|
||||
func NewReader(r io.Reader, uncompressedSize int) (io.ReadCloser, error) {
|
||||
if uncompressedSize > windowSize {
|
||||
return nil, errors.New("uncompressed size is limited to 32KB")
|
||||
}
|
||||
f := &decompressor{
|
||||
lru: [3]uint16{1, 1, 1},
|
||||
uncompressed: uncompressedSize,
|
||||
b: make([]byte, 4096),
|
||||
r: r,
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
51
vendor/github.com/Microsoft/go-winio/wim/validate/validate.go
generated
vendored
Normal file
51
vendor/github.com/Microsoft/go-winio/wim/validate/validate.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/Microsoft/go-winio/wim"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
f, err := os.Open(flag.Arg(0))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
w, err := wim.NewReader(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
||||
}
|
||||
|
||||
fmt.Printf("%#v\n%#v\n", w.Image[0], w.Image[0].Windows)
|
||||
|
||||
dir, err := w.Image[0].Open()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = recur(dir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func recur(d *wim.File) error {
|
||||
files, err := d.Readdir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %s", d.Name, err)
|
||||
}
|
||||
for _, f := range files {
|
||||
if f.IsDir() {
|
||||
err = recur(f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %s", f.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
866
vendor/github.com/Microsoft/go-winio/wim/wim.go
generated
vendored
Normal file
866
vendor/github.com/Microsoft/go-winio/wim/wim.go
generated
vendored
Normal file
@@ -0,0 +1,866 @@
|
||||
// Package wim implements a WIM file parser.
|
||||
//
|
||||
// WIM files are used to distribute Windows file system and container images.
|
||||
// They are documented at https://msdn.microsoft.com/en-us/library/windows/desktop/dd861280.aspx.
|
||||
package wim
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/binary"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
// File attribute constants from Windows.
|
||||
const (
|
||||
FILE_ATTRIBUTE_READONLY = 0x00000001
|
||||
FILE_ATTRIBUTE_HIDDEN = 0x00000002
|
||||
FILE_ATTRIBUTE_SYSTEM = 0x00000004
|
||||
FILE_ATTRIBUTE_DIRECTORY = 0x00000010
|
||||
FILE_ATTRIBUTE_ARCHIVE = 0x00000020
|
||||
FILE_ATTRIBUTE_DEVICE = 0x00000040
|
||||
FILE_ATTRIBUTE_NORMAL = 0x00000080
|
||||
FILE_ATTRIBUTE_TEMPORARY = 0x00000100
|
||||
FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200
|
||||
FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400
|
||||
FILE_ATTRIBUTE_COMPRESSED = 0x00000800
|
||||
FILE_ATTRIBUTE_OFFLINE = 0x00001000
|
||||
FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000
|
||||
FILE_ATTRIBUTE_ENCRYPTED = 0x00004000
|
||||
FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000
|
||||
FILE_ATTRIBUTE_VIRTUAL = 0x00010000
|
||||
FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000
|
||||
FILE_ATTRIBUTE_EA = 0x00040000
|
||||
)
|
||||
|
||||
// Windows processor architectures.
|
||||
const (
|
||||
PROCESSOR_ARCHITECTURE_INTEL = 0
|
||||
PROCESSOR_ARCHITECTURE_MIPS = 1
|
||||
PROCESSOR_ARCHITECTURE_ALPHA = 2
|
||||
PROCESSOR_ARCHITECTURE_PPC = 3
|
||||
PROCESSOR_ARCHITECTURE_SHX = 4
|
||||
PROCESSOR_ARCHITECTURE_ARM = 5
|
||||
PROCESSOR_ARCHITECTURE_IA64 = 6
|
||||
PROCESSOR_ARCHITECTURE_ALPHA64 = 7
|
||||
PROCESSOR_ARCHITECTURE_MSIL = 8
|
||||
PROCESSOR_ARCHITECTURE_AMD64 = 9
|
||||
PROCESSOR_ARCHITECTURE_IA32_ON_WIN64 = 10
|
||||
PROCESSOR_ARCHITECTURE_NEUTRAL = 11
|
||||
PROCESSOR_ARCHITECTURE_ARM64 = 12
|
||||
)
|
||||
|
||||
var wimImageTag = [...]byte{'M', 'S', 'W', 'I', 'M', 0, 0, 0}
|
||||
|
||||
type guid struct {
|
||||
Data1 uint32
|
||||
Data2 uint16
|
||||
Data3 uint16
|
||||
Data4 [8]byte
|
||||
}
|
||||
|
||||
func (g guid) String() string {
|
||||
return fmt.Sprintf("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", g.Data1, g.Data2, g.Data3, g.Data4[0], g.Data4[1], g.Data4[2], g.Data4[3], g.Data4[4], g.Data4[5], g.Data4[6], g.Data4[7])
|
||||
}
|
||||
|
||||
type resourceDescriptor struct {
|
||||
FlagsAndCompressedSize uint64
|
||||
Offset int64
|
||||
OriginalSize int64
|
||||
}
|
||||
|
||||
type resFlag byte
|
||||
|
||||
const (
|
||||
resFlagFree resFlag = 1 << iota
|
||||
resFlagMetadata
|
||||
resFlagCompressed
|
||||
resFlagSpanned
|
||||
)
|
||||
|
||||
const validate = false
|
||||
|
||||
const supportedResFlags = resFlagMetadata | resFlagCompressed
|
||||
|
||||
func (r *resourceDescriptor) Flags() resFlag {
|
||||
return resFlag(r.FlagsAndCompressedSize >> 56)
|
||||
}
|
||||
|
||||
func (r *resourceDescriptor) CompressedSize() int64 {
|
||||
return int64(r.FlagsAndCompressedSize & 0xffffffffffffff)
|
||||
}
|
||||
|
||||
func (r *resourceDescriptor) String() string {
|
||||
s := fmt.Sprintf("%d bytes at %d", r.CompressedSize(), r.Offset)
|
||||
if r.Flags()&4 != 0 {
|
||||
s += fmt.Sprintf(" (uncompresses to %d)", r.OriginalSize)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// SHA1Hash contains the SHA1 hash of a file or stream.
|
||||
type SHA1Hash [20]byte
|
||||
|
||||
type streamDescriptor struct {
|
||||
resourceDescriptor
|
||||
PartNumber uint16
|
||||
RefCount uint32
|
||||
Hash SHA1Hash
|
||||
}
|
||||
|
||||
type hdrFlag uint32
|
||||
|
||||
const (
|
||||
hdrFlagReserved hdrFlag = 1 << iota
|
||||
hdrFlagCompressed
|
||||
hdrFlagReadOnly
|
||||
hdrFlagSpanned
|
||||
hdrFlagResourceOnly
|
||||
hdrFlagMetadataOnly
|
||||
hdrFlagWriteInProgress
|
||||
hdrFlagRpFix
|
||||
)
|
||||
|
||||
const (
|
||||
hdrFlagCompressReserved hdrFlag = 1 << (iota + 16)
|
||||
hdrFlagCompressXpress
|
||||
hdrFlagCompressLzx
|
||||
)
|
||||
|
||||
const supportedHdrFlags = hdrFlagRpFix | hdrFlagReadOnly | hdrFlagCompressed | hdrFlagCompressLzx
|
||||
|
||||
type wimHeader struct {
|
||||
ImageTag [8]byte
|
||||
Size uint32
|
||||
Version uint32
|
||||
Flags hdrFlag
|
||||
CompressionSize uint32
|
||||
WIMGuid guid
|
||||
PartNumber uint16
|
||||
TotalParts uint16
|
||||
ImageCount uint32
|
||||
OffsetTable resourceDescriptor
|
||||
XMLData resourceDescriptor
|
||||
BootMetadata resourceDescriptor
|
||||
BootIndex uint32
|
||||
Padding uint32
|
||||
Integrity resourceDescriptor
|
||||
Unused [60]byte
|
||||
}
|
||||
|
||||
type securityblockDisk struct {
|
||||
TotalLength uint32
|
||||
NumEntries uint32
|
||||
}
|
||||
|
||||
const securityblockDiskSize = 8
|
||||
|
||||
type direntry struct {
|
||||
Attributes uint32
|
||||
SecurityID uint32
|
||||
SubdirOffset int64
|
||||
Unused1, Unused2 int64
|
||||
CreationTime Filetime
|
||||
LastAccessTime Filetime
|
||||
LastWriteTime Filetime
|
||||
Hash SHA1Hash
|
||||
Padding uint32
|
||||
ReparseHardLink int64
|
||||
StreamCount uint16
|
||||
ShortNameLength uint16
|
||||
FileNameLength uint16
|
||||
}
|
||||
|
||||
var direntrySize = int64(binary.Size(direntry{}) + 8) // includes an 8-byte length prefix
|
||||
|
||||
type streamentry struct {
|
||||
Unused int64
|
||||
Hash SHA1Hash
|
||||
NameLength int16
|
||||
}
|
||||
|
||||
var streamentrySize = int64(binary.Size(streamentry{}) + 8) // includes an 8-byte length prefix
|
||||
|
||||
// Filetime represents a Windows time.
|
||||
type Filetime struct {
|
||||
LowDateTime uint32
|
||||
HighDateTime uint32
|
||||
}
|
||||
|
||||
// Time returns the time as time.Time.
|
||||
func (ft *Filetime) Time() time.Time {
|
||||
// 100-nanosecond intervals since January 1, 1601
|
||||
nsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime)
|
||||
// change starting time to the Epoch (00:00:00 UTC, January 1, 1970)
|
||||
nsec -= 116444736000000000
|
||||
// convert into nanoseconds
|
||||
nsec *= 100
|
||||
return time.Unix(0, nsec)
|
||||
}
|
||||
|
||||
// UnmarshalXML unmarshals the time from a WIM XML blob.
|
||||
func (ft *Filetime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
type time struct {
|
||||
Low string `xml:"LOWPART"`
|
||||
High string `xml:"HIGHPART"`
|
||||
}
|
||||
var t time
|
||||
err := d.DecodeElement(&t, &start)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
low, err := strconv.ParseUint(t.Low, 0, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
high, err := strconv.ParseUint(t.High, 0, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ft.LowDateTime = uint32(low)
|
||||
ft.HighDateTime = uint32(high)
|
||||
return nil
|
||||
}
|
||||
|
||||
type info struct {
|
||||
Image []ImageInfo `xml:"IMAGE"`
|
||||
}
|
||||
|
||||
// ImageInfo contains information about the image.
|
||||
type ImageInfo struct {
|
||||
Name string `xml:"NAME"`
|
||||
Index int `xml:"INDEX,attr"`
|
||||
CreationTime Filetime `xml:"CREATIONTIME"`
|
||||
ModTime Filetime `xml:"LASTMODIFICATIONTIME"`
|
||||
Windows *WindowsInfo `xml:"WINDOWS"`
|
||||
}
|
||||
|
||||
// WindowsInfo contains information about the Windows installation in the image.
|
||||
type WindowsInfo struct {
|
||||
Arch byte `xml:"ARCH"`
|
||||
ProductName string `xml:"PRODUCTNAME"`
|
||||
EditionID string `xml:"EDITIONID"`
|
||||
InstallationType string `xml:"INSTALLATIONTYPE"`
|
||||
ProductType string `xml:"PRODUCTTYPE"`
|
||||
Languages []string `xml:"LANGUAGES>LANGUAGE"`
|
||||
DefaultLanguage string `xml:"LANGUAGES>DEFAULT"`
|
||||
Version Version `xml:"VERSION"`
|
||||
SystemRoot string `xml:"SYSTEMROOT"`
|
||||
}
|
||||
|
||||
// Version represents a Windows build version.
|
||||
type Version struct {
|
||||
Major int `xml:"MAJOR"`
|
||||
Minor int `xml:"MINOR"`
|
||||
Build int `xml:"BUILD"`
|
||||
SPBuild int `xml:"SPBUILD"`
|
||||
SPLevel int `xml:"SPLEVEL"`
|
||||
}
|
||||
|
||||
// ParseError is returned when the WIM cannot be parsed.
|
||||
type ParseError struct {
|
||||
Oper string
|
||||
Path string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *ParseError) Error() string {
|
||||
if e.Path == "" {
|
||||
return "WIM parse error at " + e.Oper + ": " + e.Err.Error()
|
||||
}
|
||||
return fmt.Sprintf("WIM parse error: %s %s: %s", e.Oper, e.Path, e.Err.Error())
|
||||
}
|
||||
|
||||
// Reader provides functions to read a WIM file.
|
||||
type Reader struct {
|
||||
hdr wimHeader
|
||||
r io.ReaderAt
|
||||
fileData map[SHA1Hash]resourceDescriptor
|
||||
|
||||
XMLInfo string // The XML information about the WIM.
|
||||
Image []*Image // The WIM's images.
|
||||
}
|
||||
|
||||
// Image represents an image within a WIM file.
|
||||
type Image struct {
|
||||
wim *Reader
|
||||
offset resourceDescriptor
|
||||
sds [][]byte
|
||||
rootOffset int64
|
||||
r io.ReadCloser
|
||||
curOffset int64
|
||||
m sync.Mutex
|
||||
|
||||
ImageInfo
|
||||
}
|
||||
|
||||
// StreamHeader contains alternate data stream metadata.
|
||||
type StreamHeader struct {
|
||||
Name string
|
||||
Hash SHA1Hash
|
||||
Size int64
|
||||
}
|
||||
|
||||
// Stream represents an alternate data stream or reparse point data stream.
|
||||
type Stream struct {
|
||||
StreamHeader
|
||||
wim *Reader
|
||||
offset resourceDescriptor
|
||||
}
|
||||
|
||||
// FileHeader contains file metadata.
|
||||
type FileHeader struct {
|
||||
Name string
|
||||
ShortName string
|
||||
Attributes uint32
|
||||
SecurityDescriptor []byte
|
||||
CreationTime Filetime
|
||||
LastAccessTime Filetime
|
||||
LastWriteTime Filetime
|
||||
Hash SHA1Hash
|
||||
Size int64
|
||||
LinkID int64
|
||||
ReparseTag uint32
|
||||
ReparseReserved uint32
|
||||
}
|
||||
|
||||
// File represents a file or directory in a WIM image.
|
||||
type File struct {
|
||||
FileHeader
|
||||
Streams []*Stream
|
||||
offset resourceDescriptor
|
||||
img *Image
|
||||
subdirOffset int64
|
||||
}
|
||||
|
||||
// NewReader returns a Reader that can be used to read WIM file data.
|
||||
func NewReader(f io.ReaderAt) (*Reader, error) {
|
||||
r := &Reader{r: f}
|
||||
section := io.NewSectionReader(f, 0, 0xffff)
|
||||
err := binary.Read(section, binary.LittleEndian, &r.hdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r.hdr.ImageTag != wimImageTag {
|
||||
return nil, &ParseError{Oper: "image tag", Err: errors.New("not a WIM file")}
|
||||
}
|
||||
|
||||
if r.hdr.Flags&^supportedHdrFlags != 0 {
|
||||
return nil, fmt.Errorf("unsupported WIM flags %x", r.hdr.Flags&^supportedHdrFlags)
|
||||
}
|
||||
|
||||
if r.hdr.CompressionSize != 0x8000 {
|
||||
return nil, fmt.Errorf("unsupported compression size %d", r.hdr.CompressionSize)
|
||||
}
|
||||
|
||||
if r.hdr.TotalParts != 1 {
|
||||
return nil, errors.New("multi-part WIM not supported")
|
||||
}
|
||||
|
||||
fileData, images, err := r.readOffsetTable(&r.hdr.OffsetTable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
xmlinfo, err := r.readXML()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info info
|
||||
err = xml.Unmarshal([]byte(xmlinfo), &info)
|
||||
if err != nil {
|
||||
return nil, &ParseError{Oper: "XML info", Err: err}
|
||||
}
|
||||
|
||||
for i, img := range images {
|
||||
for _, imgInfo := range info.Image {
|
||||
if imgInfo.Index == i+1 {
|
||||
img.ImageInfo = imgInfo
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.fileData = fileData
|
||||
r.Image = images
|
||||
r.XMLInfo = xmlinfo
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Close releases resources associated with the Reader.
|
||||
func (r *Reader) Close() error {
|
||||
for _, img := range r.Image {
|
||||
img.reset()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reader) resourceReader(hdr *resourceDescriptor) (io.ReadCloser, error) {
|
||||
return r.resourceReaderWithOffset(hdr, 0)
|
||||
}
|
||||
|
||||
func (r *Reader) resourceReaderWithOffset(hdr *resourceDescriptor, offset int64) (io.ReadCloser, error) {
|
||||
var sr io.ReadCloser
|
||||
section := io.NewSectionReader(r.r, hdr.Offset, hdr.CompressedSize())
|
||||
if hdr.Flags()&resFlagCompressed == 0 {
|
||||
section.Seek(offset, 0)
|
||||
sr = ioutil.NopCloser(section)
|
||||
} else {
|
||||
cr, err := newCompressedReader(section, hdr.OriginalSize, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sr = cr
|
||||
}
|
||||
|
||||
return sr, nil
|
||||
}
|
||||
|
||||
func (r *Reader) readResource(hdr *resourceDescriptor) ([]byte, error) {
|
||||
rsrc, err := r.resourceReader(hdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rsrc.Close()
|
||||
return ioutil.ReadAll(rsrc)
|
||||
}
|
||||
|
||||
func (r *Reader) readXML() (string, error) {
|
||||
if r.hdr.XMLData.CompressedSize() == 0 {
|
||||
return "", nil
|
||||
}
|
||||
rsrc, err := r.resourceReader(&r.hdr.XMLData)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer rsrc.Close()
|
||||
|
||||
XMLData := make([]uint16, r.hdr.XMLData.OriginalSize/2)
|
||||
err = binary.Read(rsrc, binary.LittleEndian, XMLData)
|
||||
if err != nil {
|
||||
return "", &ParseError{Oper: "XML data", Err: err}
|
||||
}
|
||||
|
||||
// The BOM will always indicate little-endian UTF-16.
|
||||
if XMLData[0] != 0xfeff {
|
||||
return "", &ParseError{Oper: "XML data", Err: errors.New("invalid BOM")}
|
||||
}
|
||||
return string(utf16.Decode(XMLData[1:])), nil
|
||||
}
|
||||
|
||||
func (r *Reader) readOffsetTable(res *resourceDescriptor) (map[SHA1Hash]resourceDescriptor, []*Image, error) {
|
||||
fileData := make(map[SHA1Hash]resourceDescriptor)
|
||||
var images []*Image
|
||||
|
||||
offsetTable, err := r.readResource(res)
|
||||
if err != nil {
|
||||
return nil, nil, &ParseError{Oper: "offset table", Err: err}
|
||||
}
|
||||
|
||||
br := bytes.NewReader(offsetTable)
|
||||
for i := 0; ; i++ {
|
||||
var res streamDescriptor
|
||||
err := binary.Read(br, binary.LittleEndian, &res)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, &ParseError{Oper: "offset table", Err: err}
|
||||
}
|
||||
if res.Flags()&^supportedResFlags != 0 {
|
||||
return nil, nil, &ParseError{Oper: "offset table", Err: errors.New("unsupported resource flag")}
|
||||
}
|
||||
|
||||
// Validation for ad-hoc testing
|
||||
if validate {
|
||||
sec, err := r.resourceReader(&res.resourceDescriptor)
|
||||
if err != nil {
|
||||
panic(fmt.Sprint(i, err))
|
||||
}
|
||||
hash := sha1.New()
|
||||
_, err = io.Copy(hash, sec)
|
||||
sec.Close()
|
||||
if err != nil {
|
||||
panic(fmt.Sprint(i, err))
|
||||
}
|
||||
var cmphash SHA1Hash
|
||||
copy(cmphash[:], hash.Sum(nil))
|
||||
if cmphash != res.Hash {
|
||||
panic(fmt.Sprint(i, "hash mismatch"))
|
||||
}
|
||||
}
|
||||
|
||||
if res.Flags()&resFlagMetadata != 0 {
|
||||
image := &Image{
|
||||
wim: r,
|
||||
offset: res.resourceDescriptor,
|
||||
}
|
||||
images = append(images, image)
|
||||
} else {
|
||||
fileData[res.Hash] = res.resourceDescriptor
|
||||
}
|
||||
}
|
||||
|
||||
if len(images) != int(r.hdr.ImageCount) {
|
||||
return nil, nil, &ParseError{Oper: "offset table", Err: errors.New("mismatched image count")}
|
||||
}
|
||||
|
||||
return fileData, images, nil
|
||||
}
|
||||
|
||||
func (r *Reader) readSecurityDescriptors(rsrc io.Reader) (sds [][]byte, n int64, err error) {
|
||||
var secBlock securityblockDisk
|
||||
err = binary.Read(rsrc, binary.LittleEndian, &secBlock)
|
||||
if err != nil {
|
||||
err = &ParseError{Oper: "security table", Err: err}
|
||||
return
|
||||
}
|
||||
|
||||
n += securityblockDiskSize
|
||||
|
||||
secSizes := make([]int64, secBlock.NumEntries)
|
||||
err = binary.Read(rsrc, binary.LittleEndian, &secSizes)
|
||||
if err != nil {
|
||||
err = &ParseError{Oper: "security table sizes", Err: err}
|
||||
return
|
||||
}
|
||||
|
||||
n += int64(secBlock.NumEntries * 8)
|
||||
|
||||
sds = make([][]byte, secBlock.NumEntries)
|
||||
for i, size := range secSizes {
|
||||
sd := make([]byte, size&0xffffffff)
|
||||
_, err = io.ReadFull(rsrc, sd)
|
||||
if err != nil {
|
||||
err = &ParseError{Oper: "security descriptor", Err: err}
|
||||
return
|
||||
}
|
||||
n += int64(len(sd))
|
||||
sds[i] = sd
|
||||
}
|
||||
|
||||
secsize := int64((secBlock.TotalLength + 7) &^ 7)
|
||||
if n > secsize {
|
||||
err = &ParseError{Oper: "security descriptor", Err: errors.New("security descriptor table too small")}
|
||||
return
|
||||
}
|
||||
|
||||
_, err = io.CopyN(ioutil.Discard, rsrc, secsize-n)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n = secsize
|
||||
return
|
||||
}
|
||||
|
||||
// Open parses the image and returns the root directory.
|
||||
func (img *Image) Open() (*File, error) {
|
||||
if img.sds == nil {
|
||||
rsrc, err := img.wim.resourceReaderWithOffset(&img.offset, img.rootOffset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sds, n, err := img.wim.readSecurityDescriptors(rsrc)
|
||||
if err != nil {
|
||||
rsrc.Close()
|
||||
return nil, err
|
||||
}
|
||||
img.sds = sds
|
||||
img.r = rsrc
|
||||
img.rootOffset = n
|
||||
img.curOffset = n
|
||||
}
|
||||
|
||||
f, err := img.readdir(img.rootOffset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(f) != 1 {
|
||||
return nil, &ParseError{Oper: "root directory", Err: errors.New("expected exactly 1 root directory entry")}
|
||||
}
|
||||
return f[0], err
|
||||
}
|
||||
|
||||
func (img *Image) reset() {
|
||||
if img.r != nil {
|
||||
img.r.Close()
|
||||
img.r = nil
|
||||
}
|
||||
img.curOffset = -1
|
||||
}
|
||||
|
||||
func (img *Image) readdir(offset int64) ([]*File, error) {
|
||||
img.m.Lock()
|
||||
defer img.m.Unlock()
|
||||
|
||||
if offset < img.curOffset || offset > img.curOffset+chunkSize {
|
||||
// Reset to seek backward or to seek forward very far.
|
||||
img.reset()
|
||||
}
|
||||
if img.r == nil {
|
||||
rsrc, err := img.wim.resourceReaderWithOffset(&img.offset, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
img.r = rsrc
|
||||
img.curOffset = offset
|
||||
}
|
||||
if offset > img.curOffset {
|
||||
_, err := io.CopyN(ioutil.Discard, img.r, offset-img.curOffset)
|
||||
if err != nil {
|
||||
img.reset()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var entries []*File
|
||||
for {
|
||||
e, n, err := img.readNextEntry(img.r)
|
||||
img.curOffset += n
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
img.reset()
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, e)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (img *Image) readNextEntry(r io.Reader) (*File, int64, error) {
|
||||
var length int64
|
||||
err := binary.Read(r, binary.LittleEndian, &length)
|
||||
if err != nil {
|
||||
return nil, 0, &ParseError{Oper: "directory length check", Err: err}
|
||||
}
|
||||
|
||||
if length == 0 {
|
||||
return nil, 8, io.EOF
|
||||
}
|
||||
|
||||
left := length
|
||||
if left < direntrySize {
|
||||
return nil, 0, &ParseError{Oper: "directory entry", Err: errors.New("size too short")}
|
||||
}
|
||||
|
||||
var dentry direntry
|
||||
err = binary.Read(r, binary.LittleEndian, &dentry)
|
||||
if err != nil {
|
||||
return nil, 0, &ParseError{Oper: "directory entry", Err: err}
|
||||
}
|
||||
|
||||
left -= direntrySize
|
||||
|
||||
namesLen := int64(dentry.FileNameLength + 2 + dentry.ShortNameLength)
|
||||
if left < namesLen {
|
||||
return nil, 0, &ParseError{Oper: "directory entry", Err: errors.New("size too short for names")}
|
||||
}
|
||||
|
||||
names := make([]uint16, namesLen/2)
|
||||
err = binary.Read(r, binary.LittleEndian, names)
|
||||
if err != nil {
|
||||
return nil, 0, &ParseError{Oper: "file name", Err: err}
|
||||
}
|
||||
|
||||
left -= namesLen
|
||||
|
||||
var name, shortName string
|
||||
if dentry.FileNameLength > 0 {
|
||||
name = string(utf16.Decode(names[:dentry.FileNameLength/2]))
|
||||
}
|
||||
|
||||
if dentry.ShortNameLength > 0 {
|
||||
shortName = string(utf16.Decode(names[dentry.FileNameLength/2+1:]))
|
||||
}
|
||||
|
||||
var offset resourceDescriptor
|
||||
zerohash := SHA1Hash{}
|
||||
if dentry.Hash != zerohash {
|
||||
var ok bool
|
||||
offset, ok = img.wim.fileData[dentry.Hash]
|
||||
if !ok {
|
||||
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: fmt.Errorf("could not find file data matching hash %#v", dentry)}
|
||||
}
|
||||
}
|
||||
|
||||
f := &File{
|
||||
FileHeader: FileHeader{
|
||||
Attributes: dentry.Attributes,
|
||||
CreationTime: dentry.CreationTime,
|
||||
LastAccessTime: dentry.LastAccessTime,
|
||||
LastWriteTime: dentry.LastWriteTime,
|
||||
Hash: dentry.Hash,
|
||||
Size: offset.OriginalSize,
|
||||
Name: name,
|
||||
ShortName: shortName,
|
||||
},
|
||||
|
||||
offset: offset,
|
||||
img: img,
|
||||
subdirOffset: dentry.SubdirOffset,
|
||||
}
|
||||
|
||||
isDir := false
|
||||
|
||||
if dentry.Attributes&FILE_ATTRIBUTE_REPARSE_POINT == 0 {
|
||||
f.LinkID = dentry.ReparseHardLink
|
||||
if dentry.Attributes&FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||
isDir = true
|
||||
}
|
||||
} else {
|
||||
f.ReparseTag = uint32(dentry.ReparseHardLink)
|
||||
f.ReparseReserved = uint32(dentry.ReparseHardLink >> 32)
|
||||
}
|
||||
|
||||
if isDir && f.subdirOffset == 0 {
|
||||
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("no subdirectory data for directory")}
|
||||
} else if !isDir && f.subdirOffset != 0 {
|
||||
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("unexpected subdirectory data for non-directory")}
|
||||
}
|
||||
|
||||
if dentry.SecurityID != 0xffffffff {
|
||||
f.SecurityDescriptor = img.sds[dentry.SecurityID]
|
||||
}
|
||||
|
||||
_, err = io.CopyN(ioutil.Discard, r, left)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if dentry.StreamCount > 0 {
|
||||
var streams []*Stream
|
||||
for i := uint16(0); i < dentry.StreamCount; i++ {
|
||||
s, n, err := img.readNextStream(r)
|
||||
length += n
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
// The first unnamed stream should be treated as the file stream.
|
||||
if i == 0 && s.Name == "" {
|
||||
f.Hash = s.Hash
|
||||
f.Size = s.Size
|
||||
f.offset = s.offset
|
||||
} else if s.Name != "" {
|
||||
streams = append(streams, s)
|
||||
}
|
||||
}
|
||||
f.Streams = streams
|
||||
}
|
||||
|
||||
if dentry.Attributes&FILE_ATTRIBUTE_REPARSE_POINT != 0 && f.Size == 0 {
|
||||
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("reparse point is missing reparse stream")}
|
||||
}
|
||||
|
||||
return f, length, nil
|
||||
}
|
||||
|
||||
func (img *Image) readNextStream(r io.Reader) (*Stream, int64, error) {
|
||||
var length int64
|
||||
err := binary.Read(r, binary.LittleEndian, &length)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil, 0, &ParseError{Oper: "stream length check", Err: err}
|
||||
}
|
||||
|
||||
left := length
|
||||
if left < streamentrySize {
|
||||
return nil, 0, &ParseError{Oper: "stream entry", Err: errors.New("size too short")}
|
||||
}
|
||||
|
||||
var sentry streamentry
|
||||
err = binary.Read(r, binary.LittleEndian, &sentry)
|
||||
if err != nil {
|
||||
return nil, 0, &ParseError{Oper: "stream entry", Err: err}
|
||||
}
|
||||
|
||||
left -= streamentrySize
|
||||
|
||||
if left < int64(sentry.NameLength) {
|
||||
return nil, 0, &ParseError{Oper: "stream entry", Err: errors.New("size too short for name")}
|
||||
}
|
||||
|
||||
names := make([]uint16, sentry.NameLength/2)
|
||||
err = binary.Read(r, binary.LittleEndian, names)
|
||||
if err != nil {
|
||||
return nil, 0, &ParseError{Oper: "file name", Err: err}
|
||||
}
|
||||
|
||||
left -= int64(sentry.NameLength)
|
||||
name := string(utf16.Decode(names))
|
||||
|
||||
var offset resourceDescriptor
|
||||
if sentry.Hash != (SHA1Hash{}) {
|
||||
var ok bool
|
||||
offset, ok = img.wim.fileData[sentry.Hash]
|
||||
if !ok {
|
||||
return nil, 0, &ParseError{Oper: "stream entry", Path: name, Err: fmt.Errorf("could not find file data matching hash %v", sentry.Hash)}
|
||||
}
|
||||
}
|
||||
|
||||
s := &Stream{
|
||||
StreamHeader: StreamHeader{
|
||||
Hash: sentry.Hash,
|
||||
Size: offset.OriginalSize,
|
||||
Name: name,
|
||||
},
|
||||
wim: img.wim,
|
||||
offset: offset,
|
||||
}
|
||||
|
||||
_, err = io.CopyN(ioutil.Discard, r, left)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return s, length, nil
|
||||
}
|
||||
|
||||
// Open returns an io.ReadCloser that can be used to read the stream's contents.
|
||||
func (s *Stream) Open() (io.ReadCloser, error) {
|
||||
return s.wim.resourceReader(&s.offset)
|
||||
}
|
||||
|
||||
// Open returns an io.ReadCloser that can be used to read the file's contents.
|
||||
func (f *File) Open() (io.ReadCloser, error) {
|
||||
return f.img.wim.resourceReader(&f.offset)
|
||||
}
|
||||
|
||||
// Readdir reads the directory entries.
|
||||
func (f *File) Readdir() ([]*File, error) {
|
||||
if !f.IsDir() {
|
||||
return nil, errors.New("not a directory")
|
||||
}
|
||||
return f.img.readdir(f.subdirOffset)
|
||||
}
|
||||
|
||||
// IsDir returns whether the given file is a directory. It returns false when it
|
||||
// is a directory reparse point.
|
||||
func (f *FileHeader) IsDir() bool {
|
||||
return f.Attributes&(FILE_ATTRIBUTE_DIRECTORY|FILE_ATTRIBUTE_REPARSE_POINT) == FILE_ATTRIBUTE_DIRECTORY
|
||||
}
|
||||
8
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
8
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
@@ -38,14 +38,12 @@ func errnoErr(e syscall.Errno) error {
|
||||
|
||||
var (
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
modwinmm = windows.NewLazySystemDLL("winmm.dll")
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
|
||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
||||
proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod")
|
||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||
@@ -122,12 +120,6 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro
|
||||
return
|
||||
}
|
||||
|
||||
func timeBeginPeriod(period uint32) (n int32) {
|
||||
r0, _, _ := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0)
|
||||
n = int32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
|
||||
if r1 == 0 {
|
||||
|
||||
37
vendor/github.com/docker/distribution/.gitignore
generated
vendored
Normal file
37
vendor/github.com/docker/distribution/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
# never checkin from the bin file (for now)
|
||||
bin/*
|
||||
|
||||
# Test key files
|
||||
*.pem
|
||||
|
||||
# Cover profiles
|
||||
*.out
|
||||
|
||||
# Editor/IDE specific files.
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
18
vendor/github.com/docker/distribution/.mailmap
generated
vendored
Normal file
18
vendor/github.com/docker/distribution/.mailmap
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@users.noreply.github.com>
|
||||
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@gmail.com>
|
||||
Olivier Gambier <olivier@docker.com> Olivier Gambier <dmp42@users.noreply.github.com>
|
||||
Brian Bland <brian.bland@docker.com> Brian Bland <r4nd0m1n4t0r@gmail.com>
|
||||
Brian Bland <brian.bland@docker.com> Brian Bland <brian.t.bland@gmail.com>
|
||||
Josh Hawn <josh.hawn@docker.com> Josh Hawn <jlhawn@berkeley.edu>
|
||||
Richard Scothern <richard.scothern@docker.com> Richard <richard.scothern@gmail.com>
|
||||
Richard Scothern <richard.scothern@docker.com> Richard Scothern <richard.scothern@gmail.com>
|
||||
Andrew Meredith <andymeredith@gmail.com> Andrew Meredith <kendru@users.noreply.github.com>
|
||||
harche <p.harshal@gmail.com> harche <harche@users.noreply.github.com>
|
||||
Jessie Frazelle <jessie@docker.com> <jfrazelle@users.noreply.github.com>
|
||||
Sharif Nassar <sharif@mrwacky.com> Sharif Nassar <mrwacky42@users.noreply.github.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> Sven Dowideit <SvenDowideit@users.noreply.github.com>
|
||||
Vincent Giersch <vincent.giersch@ovh.net> Vincent Giersch <vincent@giersch.fr>
|
||||
davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
|
||||
Omer Cohen <git@omer.io> Omer Cohen <git@omerc.net>
|
||||
Eric Yang <windfarer@gmail.com> Eric Yang <Windfarer@users.noreply.github.com>
|
||||
Nikita Tarasov <nikita@mygento.ru> Nikita <luckyraul@users.noreply.github.com>
|
||||
117
vendor/github.com/docker/distribution/BUILDING.md
generated
vendored
Normal file
117
vendor/github.com/docker/distribution/BUILDING.md
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
|
||||
# Building the registry source
|
||||
|
||||
## Use-case
|
||||
|
||||
This is useful if you intend to actively work on the registry.
|
||||
|
||||
### Alternatives
|
||||
|
||||
Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/).
|
||||
|
||||
People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
|
||||
|
||||
OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md).
|
||||
|
||||
### Gotchas
|
||||
|
||||
You are expected to know your way around with go & git.
|
||||
|
||||
If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you.
|
||||
|
||||
## Build the development environment
|
||||
|
||||
The first prerequisite of properly building distribution targets is to have a Go
|
||||
development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html)
|
||||
for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the
|
||||
environment.
|
||||
|
||||
If a Go development environment is setup, one can use `go get` to install the
|
||||
`registry` command from the current latest:
|
||||
|
||||
go get github.com/docker/distribution/cmd/registry
|
||||
|
||||
The above will install the source repository into the `GOPATH`.
|
||||
|
||||
Now create the directory for the registry data (this might require you to set permissions properly)
|
||||
|
||||
mkdir -p /var/lib/registry
|
||||
|
||||
... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location.
|
||||
|
||||
The `registry`
|
||||
binary can then be run with the following:
|
||||
|
||||
$ $GOPATH/bin/registry --version
|
||||
$GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown
|
||||
|
||||
> __NOTE:__ While you do not need to use `go get` to checkout the distribution
|
||||
> project, for these build instructions to work, the project must be checked
|
||||
> out in the correct location in the `GOPATH`. This should almost always be
|
||||
> `$GOPATH/src/github.com/docker/distribution`.
|
||||
|
||||
The registry can be run with the default config using the following
|
||||
incantation:
|
||||
|
||||
$ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml
|
||||
INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
|
||||
INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
|
||||
INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
|
||||
INFO[0000] debug server listening localhost:5001
|
||||
|
||||
If it is working, one should see the above log messages.
|
||||
|
||||
### Repeatable Builds
|
||||
|
||||
For the full development experience, one should `cd` into
|
||||
`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go`
|
||||
commands, such as `go test`, should work per package (please see
|
||||
[Developing](#developing) if they don't work).
|
||||
|
||||
A `Makefile` has been provided as a convenience to support repeatable builds.
|
||||
Please install the following into `GOPATH` for it to work:
|
||||
|
||||
go get github.com/golang/lint/golint
|
||||
|
||||
Once these commands are available in the `GOPATH`, run `make` to get a full
|
||||
build:
|
||||
|
||||
$ make
|
||||
+ clean
|
||||
+ fmt
|
||||
+ vet
|
||||
+ lint
|
||||
+ build
|
||||
github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar
|
||||
github.com/sirupsen/logrus
|
||||
github.com/docker/libtrust
|
||||
...
|
||||
github.com/yvasiyarov/gorelic
|
||||
github.com/docker/distribution/registry/handlers
|
||||
github.com/docker/distribution/cmd/registry
|
||||
+ test
|
||||
...
|
||||
ok github.com/docker/distribution/digest 7.875s
|
||||
ok github.com/docker/distribution/manifest 0.028s
|
||||
ok github.com/docker/distribution/notifications 17.322s
|
||||
? github.com/docker/distribution/registry [no test files]
|
||||
ok github.com/docker/distribution/registry/api/v2 0.101s
|
||||
? github.com/docker/distribution/registry/auth [no test files]
|
||||
ok github.com/docker/distribution/registry/auth/silly 0.011s
|
||||
...
|
||||
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry
|
||||
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template
|
||||
+ binaries
|
||||
|
||||
The above provides a repeatable build using the contents of the vendor
|
||||
directory. This includes formatting, vetting, linting, building,
|
||||
testing and generating tagged binaries. We can verify this worked by running
|
||||
the registry binary generated in the "./bin" directory:
|
||||
|
||||
$ ./bin/registry -version
|
||||
./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m
|
||||
|
||||
### Optional build tags
|
||||
|
||||
Optional [build tags](http://golang.org/pkg/go/build/) can be provided using
|
||||
the environment variable `DOCKER_BUILDTAGS`.
|
||||
108
vendor/github.com/docker/distribution/CHANGELOG.md
generated
vendored
Normal file
108
vendor/github.com/docker/distribution/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
# Changelog
|
||||
|
||||
## 2.6.0 (2017-01-18)
|
||||
|
||||
#### Storage
|
||||
- S3: fixed bug in delete due to read-after-write inconsistency
|
||||
- S3: allow EC2 IAM roles to be used when authorizing region endpoints
|
||||
- S3: add Object ACL Support
|
||||
- S3: fix delete method's notion of subpaths
|
||||
- S3: use multipart upload API in `Move` method for performance
|
||||
- S3: add v2 signature signing for legacy S3 clones
|
||||
- Swift: add simple heuristic to detect incomplete DLOs during read ops
|
||||
- Swift: support different user and tenant domains
|
||||
- Swift: bulk deletes in chunks
|
||||
- Aliyun OSS: fix delete method's notion of subpaths
|
||||
- Aliyun OSS: optimize data copy after upload finishes
|
||||
- Azure: close leaking response body
|
||||
- Fix storage drivers dropping non-EOF errors when listing repositories
|
||||
- Compare path properly when listing repositories in catalog
|
||||
- Add a foreign layer URL host whitelist
|
||||
- Improve catalog enumerate runtime
|
||||
|
||||
#### Registry
|
||||
- Export `storage.CreateOptions` in top-level package
|
||||
- Enable notifications to endpoints that use self-signed certificates
|
||||
- Properly validate multi-URL foreign layers
|
||||
- Add control over validation of URLs in pushed manifests
|
||||
- Proxy mode: fix socket leak when pull is cancelled
|
||||
- Tag service: properly handle error responses on HEAD request
|
||||
- Support for custom authentication URL in proxying registry
|
||||
- Add configuration option to disable access logging
|
||||
- Add notification filtering by target media type
|
||||
- Manifest: `References()` returns all children
|
||||
- Honor `X-Forwarded-Port` and Forwarded headers
|
||||
- Reference: Preserve tag and digest in With* functions
|
||||
- Add policy configuration for enforcing repository classes
|
||||
|
||||
#### Client
|
||||
- Changes the client Tags `All()` method to follow links
|
||||
- Allow registry clients to connect via HTTP2
|
||||
- Better handling of OAuth errors in client
|
||||
|
||||
#### Spec
|
||||
- Manifest: clarify relationship between urls and foreign layers
|
||||
- Authorization: add support for repository classes
|
||||
|
||||
#### Manifest
|
||||
- Override media type returned from `Stat()` for existing manifests
|
||||
- Add plugin mediatype to distribution manifest
|
||||
|
||||
#### Docs
|
||||
- Document `TOOMANYREQUESTS` error code
|
||||
- Document required Let's Encrypt port
|
||||
- Improve documentation around implementation of OAuth2
|
||||
- Improve documentation for configuration
|
||||
|
||||
#### Auth
|
||||
- Add support for registry type in scope
|
||||
- Add support for using v2 ping challenges for v1
|
||||
- Add leeway to JWT `nbf` and `exp` checking
|
||||
- htpasswd: dynamically parse htpasswd file
|
||||
- Fix missing auth headers with PATCH HTTP request when pushing to default port
|
||||
|
||||
#### Dockerfile
|
||||
- Update to go1.7
|
||||
- Reorder Dockerfile steps for better layer caching
|
||||
|
||||
#### Notes
|
||||
|
||||
Documentation has moved to the documentation repository at
|
||||
`github.com/docker/docker.github.io/tree/master/registry`
|
||||
|
||||
The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing.
|
||||
|
||||
|
||||
## 2.5.0 (2016-06-14)
|
||||
|
||||
#### Storage
|
||||
- Ensure uploads directory is cleaned after upload is committed
|
||||
- Add ability to cap concurrent operations in filesystem driver
|
||||
- S3: Add 'us-gov-west-1' to the valid region list
|
||||
- Swift: Handle ceph not returning Last-Modified header for HEAD requests
|
||||
- Add redirect middleware
|
||||
|
||||
#### Registry
|
||||
- Add support for blobAccessController middleware
|
||||
- Add support for layers from foreign sources
|
||||
- Remove signature store
|
||||
- Add support for Let's Encrypt
|
||||
- Correct yaml key names in configuration
|
||||
|
||||
#### Client
|
||||
- Add option to get content digest from manifest get
|
||||
|
||||
#### Spec
|
||||
- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported
|
||||
- Clarify API documentation around catalog fetch behavior
|
||||
|
||||
#### API
|
||||
- Support returning HTTP 429 (Too Many Requests)
|
||||
|
||||
#### Documentation
|
||||
- Update auth documentation examples to show "expires in" as int
|
||||
|
||||
#### Docker Image
|
||||
- Use Alpine Linux as base image
|
||||
|
||||
|
||||
148
vendor/github.com/docker/distribution/CONTRIBUTING.md
generated
vendored
Normal file
148
vendor/github.com/docker/distribution/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
# Contributing to the registry
|
||||
|
||||
## Before reporting an issue...
|
||||
|
||||
### If your problem is with...
|
||||
|
||||
- automated builds
|
||||
- your account on the [Docker Hub](https://hub.docker.com/)
|
||||
- any other [Docker Hub](https://hub.docker.com/) issue
|
||||
|
||||
Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com)
|
||||
|
||||
### If you...
|
||||
|
||||
- need help setting up your registry
|
||||
- can't figure out something
|
||||
- are not sure what's going on or what your problem is
|
||||
|
||||
Then please do not open an issue here yet - you should first try one of the following support forums:
|
||||
|
||||
- irc: #docker-distribution on freenode
|
||||
- mailing-list: <distribution@dockerproject.org> or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
|
||||
|
||||
### Reporting security issues
|
||||
|
||||
The Docker maintainers take security seriously. If you discover a security
|
||||
issue, please bring it to their attention right away!
|
||||
|
||||
Please **DO NOT** file a public issue, instead send your report privately to
|
||||
[security@docker.com](mailto:security@docker.com).
|
||||
|
||||
## Reporting an issue properly
|
||||
|
||||
By following these simple rules you will get better and faster feedback on your issue.
|
||||
|
||||
- search the bugtracker for an already reported issue
|
||||
|
||||
### If you found an issue that describes your problem:
|
||||
|
||||
- please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
|
||||
- please refrain from adding "same thing here" or "+1" comments
|
||||
- you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
|
||||
- comment if you have some new, technical and relevant information to add to the case
|
||||
- __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
|
||||
|
||||
### If you have not found an existing issue that describes your problem:
|
||||
|
||||
1. create a new issue, with a succinct title that describes your issue:
|
||||
- bad title: "It doesn't work with my docker"
|
||||
- good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
|
||||
2. copy the output of:
|
||||
- `docker version`
|
||||
- `docker info`
|
||||
- `docker exec <registry-container> registry -version`
|
||||
3. copy the command line you used to launch your Registry
|
||||
4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
|
||||
5. reproduce your problem and get your docker daemon logs showing the error
|
||||
6. if relevant, copy your registry logs that show the error
|
||||
7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
|
||||
8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
|
||||
|
||||
## Contributing a patch for a known bug, or a small correction
|
||||
|
||||
You should follow the basic GitHub workflow:
|
||||
|
||||
1. fork
|
||||
2. commit a change
|
||||
3. make sure the tests pass
|
||||
4. PR
|
||||
|
||||
Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple:
|
||||
|
||||
- configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com`
|
||||
- sign your commits using `-s`: `git commit -s -m "My commit"`
|
||||
|
||||
Some simple rules to ensure quick merge:
|
||||
|
||||
- clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`)
|
||||
- prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once
|
||||
- if you need to amend your PR following comments, please squash instead of adding more commits
|
||||
|
||||
## Contributing new features
|
||||
|
||||
You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve.
|
||||
|
||||
If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning.
|
||||
If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work.
|
||||
|
||||
Then you should submit your implementation, clearly linking to the issue (and possible proposal).
|
||||
|
||||
Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged.
|
||||
|
||||
It's mandatory to:
|
||||
|
||||
- interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines)
|
||||
- address maintainers' comments and modify your submission accordingly
|
||||
- write tests for any new code
|
||||
|
||||
Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry.
|
||||
|
||||
Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493)
|
||||
|
||||
## Coding Style
|
||||
|
||||
Unless explicitly stated, we follow all coding guidelines from the Go
|
||||
community. While some of these standards may seem arbitrary, they somehow seem
|
||||
to result in a solid, consistent codebase.
|
||||
|
||||
It is possible that the code base does not currently comply with these
|
||||
guidelines. We are not looking for a massive PR that fixes this, since that
|
||||
goes against the spirit of the guidelines. All new contributions should make a
|
||||
best effort to clean up and make the code base better than they left it.
|
||||
Obviously, apply your best judgement. Remember, the goal here is to make the
|
||||
code base easier for humans to navigate and understand. Always keep that in
|
||||
mind when nudging others to comply.
|
||||
|
||||
The rules:
|
||||
|
||||
1. All code should be formatted with `gofmt -s`.
|
||||
2. All code should pass the default levels of
|
||||
[`golint`](https://github.com/golang/lint).
|
||||
3. All code should follow the guidelines covered in [Effective
|
||||
Go](http://golang.org/doc/effective_go.html) and [Go Code Review
|
||||
Comments](https://github.com/golang/go/wiki/CodeReviewComments).
|
||||
4. Comment the code. Tell us the why, the history and the context.
|
||||
5. Document _all_ declarations and methods, even private ones. Declare
|
||||
expectations, caveats and anything else that may be important. If a type
|
||||
gets exported, having the comments already there will ensure it's ready.
|
||||
6. Variable name length should be proportional to its context and no longer.
|
||||
`noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
|
||||
In practice, short methods will have short variable names and globals will
|
||||
have longer names.
|
||||
7. No underscores in package names. If you need a compound name, step back,
|
||||
and re-examine why you need a compound name. If you still think you need a
|
||||
compound name, lose the underscore.
|
||||
8. No utils or helpers packages. If a function is not general enough to
|
||||
warrant its own package, it has not been written generally enough to be a
|
||||
part of a util package. Just leave it unexported and well-documented.
|
||||
9. All tests should run with `go test` and outside tooling should not be
|
||||
required. No, we don't need another unit testing framework. Assertion
|
||||
packages are acceptable if they provide _real_ incremental value.
|
||||
10. Even though we call these "rules" above, they are actually just
|
||||
guidelines. Since you've read all the rules, you now know that.
|
||||
|
||||
If you are having trouble getting into the mood of idiomatic Go, we recommend
|
||||
reading through [Effective Go](http://golang.org/doc/effective_go.html). The
|
||||
[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the
|
||||
kool-aid is a lot easier than going thirsty.
|
||||
21
vendor/github.com/docker/distribution/Dockerfile
generated
vendored
Normal file
21
vendor/github.com/docker/distribution/Dockerfile
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
FROM golang:1.8-alpine
|
||||
|
||||
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
||||
ENV DOCKER_BUILDTAGS include_oss include_gcs
|
||||
|
||||
ARG GOOS=linux
|
||||
ARG GOARCH=amd64
|
||||
|
||||
RUN set -ex \
|
||||
&& apk add --no-cache make git
|
||||
|
||||
WORKDIR $DISTRIBUTION_DIR
|
||||
COPY . $DISTRIBUTION_DIR
|
||||
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
|
||||
|
||||
RUN make PREFIX=/go clean binaries
|
||||
|
||||
VOLUME ["/var/lib/registry"]
|
||||
EXPOSE 5000
|
||||
ENTRYPOINT ["registry"]
|
||||
CMD ["serve", "/etc/docker/registry/config.yml"]
|
||||
58
vendor/github.com/docker/distribution/MAINTAINERS
generated
vendored
Normal file
58
vendor/github.com/docker/distribution/MAINTAINERS
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
# Distribution maintainers file
|
||||
#
|
||||
# This file describes who runs the docker/distribution project and how.
|
||||
# This is a living document - if you see something out of date or missing, speak up!
|
||||
#
|
||||
# It is structured to be consumable by both humans and programs.
|
||||
# To extract its contents programmatically, use any TOML-compliant parser.
|
||||
#
|
||||
# This file is compiled into the MAINTAINERS file in docker/opensource.
|
||||
#
|
||||
[Org]
|
||||
[Org."Core maintainers"]
|
||||
people = [
|
||||
"aaronlehmann",
|
||||
"dmcgowan",
|
||||
"dmp42",
|
||||
"richardscothern",
|
||||
"shykes",
|
||||
"stevvooe",
|
||||
]
|
||||
|
||||
[people]
|
||||
|
||||
# A reference list of all people associated with the project.
|
||||
# All other sections should refer to people by their canonical key
|
||||
# in the people section.
|
||||
|
||||
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
|
||||
|
||||
[people.aaronlehmann]
|
||||
Name = "Aaron Lehmann"
|
||||
Email = "aaron.lehmann@docker.com"
|
||||
GitHub = "aaronlehmann"
|
||||
|
||||
[people.dmcgowan]
|
||||
Name = "Derek McGowan"
|
||||
Email = "derek@mcgstyle.net"
|
||||
GitHub = "dmcgowan"
|
||||
|
||||
[people.dmp42]
|
||||
Name = "Olivier Gambier"
|
||||
Email = "olivier@docker.com"
|
||||
GitHub = "dmp42"
|
||||
|
||||
[people.richardscothern]
|
||||
Name = "Richard Scothern"
|
||||
Email = "richard.scothern@gmail.com"
|
||||
GitHub = "richardscothern"
|
||||
|
||||
[people.shykes]
|
||||
Name = "Solomon Hykes"
|
||||
Email = "solomon@docker.com"
|
||||
GitHub = "shykes"
|
||||
|
||||
[people.stevvooe]
|
||||
Name = "Stephen Day"
|
||||
Email = "stephen.day@docker.com"
|
||||
GitHub = "stevvooe"
|
||||
99
vendor/github.com/docker/distribution/Makefile
generated
vendored
Normal file
99
vendor/github.com/docker/distribution/Makefile
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
# Set an output prefix, which is the local directory if not specified
|
||||
PREFIX?=$(shell pwd)
|
||||
|
||||
|
||||
# Used to populate version variable in main package.
|
||||
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
||||
|
||||
# Allow turning off function inlining and variable registerization
|
||||
ifeq (${DISABLE_OPTIMIZATION},true)
|
||||
GO_GCFLAGS=-gcflags "-N -l"
|
||||
VERSION:="$(VERSION)-noopt"
|
||||
endif
|
||||
|
||||
GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)"
|
||||
|
||||
.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet
|
||||
.DEFAULT: all
|
||||
all: fmt vet lint build test binaries
|
||||
|
||||
AUTHORS: .mailmap .git/HEAD
|
||||
git log --format='%aN <%aE>' | sort -fu > $@
|
||||
|
||||
# This only needs to be generated by hand when cutting full releases.
|
||||
version/version.go:
|
||||
./version/version.sh > $@
|
||||
|
||||
# Required for go 1.5 to build
|
||||
GO15VENDOREXPERIMENT := 1
|
||||
|
||||
# Go files
|
||||
GOFILES=$(shell find . -type f -name '*.go')
|
||||
|
||||
# Package list
|
||||
PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/)
|
||||
|
||||
# Resolving binary dependencies for specific targets
|
||||
GOLINT=$(shell which golint || echo '')
|
||||
VNDR=$(shell which vndr || echo '')
|
||||
|
||||
${PREFIX}/bin/registry: $(GOFILES)
|
||||
@echo "+ $@"
|
||||
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry
|
||||
|
||||
${PREFIX}/bin/digest: $(GOFILES)
|
||||
@echo "+ $@"
|
||||
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest
|
||||
|
||||
${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES)
|
||||
@echo "+ $@"
|
||||
@go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template
|
||||
|
||||
docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template
|
||||
./bin/registry-api-descriptor-template $< > $@
|
||||
|
||||
vet:
|
||||
@echo "+ $@"
|
||||
@go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS)
|
||||
|
||||
fmt:
|
||||
@echo "+ $@"
|
||||
@test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \
|
||||
(echo >&2 "+ please format Go code with 'gofmt -s'" && false)
|
||||
|
||||
lint:
|
||||
@echo "+ $@"
|
||||
$(if $(GOLINT), , \
|
||||
$(error Please install golint: `go get -u github.com/golang/lint/golint`))
|
||||
@test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)"
|
||||
|
||||
build:
|
||||
@echo "+ $@"
|
||||
@go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS)
|
||||
|
||||
test:
|
||||
@echo "+ $@"
|
||||
@go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS)
|
||||
|
||||
test-full:
|
||||
@echo "+ $@"
|
||||
@go test -tags "${DOCKER_BUILDTAGS}" $(PKGS)
|
||||
|
||||
binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template
|
||||
@echo "+ $@"
|
||||
|
||||
clean:
|
||||
@echo "+ $@"
|
||||
@rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template"
|
||||
|
||||
dep-validate:
|
||||
@echo "+ $@"
|
||||
$(if $(VNDR), , \
|
||||
$(error Please install vndr: go get github.com/lk4d4/vndr))
|
||||
@rm -Rf .vendor.bak
|
||||
@mv vendor .vendor.bak
|
||||
@$(VNDR)
|
||||
@test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \
|
||||
(echo >&2 "+ inconsistent dependencies! what you have in vendor.conf does not match with what you have in vendor" && false)
|
||||
@rm -Rf vendor
|
||||
@mv .vendor.bak vendor
|
||||
130
vendor/github.com/docker/distribution/README.md
generated
vendored
Normal file
130
vendor/github.com/docker/distribution/README.md
generated
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
# Distribution
|
||||
|
||||
The Docker toolset to pack, ship, store, and deliver content.
|
||||
|
||||
This repository's main product is the Docker Registry 2.0 implementation
|
||||
for storing and distributing Docker images. It supersedes the
|
||||
[docker/docker-registry](https://github.com/docker/docker-registry)
|
||||
project with a new API design, focused around security and performance.
|
||||
|
||||
<img src="https://www.docker.com/sites/default/files/oyster-registry-3.png" width=200px/>
|
||||
|
||||
[](https://circleci.com/gh/docker/distribution/tree/master)
|
||||
[](https://godoc.org/github.com/docker/distribution)
|
||||
|
||||
This repository contains the following components:
|
||||
|
||||
|**Component** |Description |
|
||||
|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
|
||||
| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
|
||||
| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
|
||||
| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. |
|
||||
|
||||
### How does this integrate with Docker engine?
|
||||
|
||||
This project should provide an implementation to a V2 API for use in the [Docker
|
||||
core project](https://github.com/docker/docker). The API should be embeddable
|
||||
and simplify the process of securely pulling and pushing content from `docker`
|
||||
daemons.
|
||||
|
||||
### What are the long term goals of the Distribution project?
|
||||
|
||||
The _Distribution_ project has the further long term goal of providing a
|
||||
secure tool chain for distributing content. The specifications, APIs and tools
|
||||
should be as useful with Docker as they are without.
|
||||
|
||||
Our goal is to design a professional grade and extensible content distribution
|
||||
system that allow users to:
|
||||
|
||||
* Enjoy an efficient, secured and reliable way to store, manage, package and
|
||||
exchange content
|
||||
* Hack/roll their own on top of healthy open-source components
|
||||
* Implement their own home made solution through good specs, and solid
|
||||
extensions mechanism.
|
||||
|
||||
## More about Registry 2.0
|
||||
|
||||
The new registry implementation provides the following benefits:
|
||||
|
||||
- faster push and pull
|
||||
- new, more efficient implementation
|
||||
- simplified deployment
|
||||
- pluggable storage backend
|
||||
- webhook notifications
|
||||
|
||||
For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md).
|
||||
|
||||
### Who needs to deploy a registry?
|
||||
|
||||
By default, Docker users pull images from Docker's public registry instance.
|
||||
[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
|
||||
ability. Users can also push images to a repository on Docker's public registry,
|
||||
if they have a [Docker Hub](https://hub.docker.com/) account.
|
||||
|
||||
For some users and even companies, this default behavior is sufficient. For
|
||||
others, it is not.
|
||||
|
||||
For example, users with their own software products may want to maintain a
|
||||
registry for private, company images. Also, you may wish to deploy your own
|
||||
image repository for images used to test or in continuous integration. For these
|
||||
use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md)
|
||||
may be the better choice.
|
||||
|
||||
### Migration to Registry 2.0
|
||||
|
||||
For those who have previously deployed their own registry based on the Registry
|
||||
1.0 implementation and wish to deploy a Registry 2.0 while retaining images,
|
||||
data migration is required. A tool to assist with migration efforts has been
|
||||
created. For more information see [docker/migrator](https://github.com/docker/migrator).
|
||||
|
||||
## Contribute
|
||||
|
||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
|
||||
issues, fixes, and patches to this project. If you are contributing code, see
|
||||
the instructions for [building a development environment](BUILDING.md).
|
||||
|
||||
## Support
|
||||
|
||||
If any issues are encountered while using the _Distribution_ project, several
|
||||
avenues are available for support:
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th align="left">
|
||||
IRC
|
||||
</th>
|
||||
<td>
|
||||
#docker-distribution on FreeNode
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th align="left">
|
||||
Issue Tracker
|
||||
</th>
|
||||
<td>
|
||||
github.com/docker/distribution/issues
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th align="left">
|
||||
Google Groups
|
||||
</th>
|
||||
<td>
|
||||
https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th align="left">
|
||||
Mailing List
|
||||
</th>
|
||||
<td>
|
||||
docker@dockerproject.org
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
## License
|
||||
|
||||
This project is distributed under [Apache License, Version 2.0](LICENSE).
|
||||
44
vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md
generated
vendored
Normal file
44
vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
## Registry Release Checklist
|
||||
|
||||
10. Compile release notes detailing features and since the last release.
|
||||
|
||||
Update the `CHANGELOG.md` file and create a PR to master with the updates.
|
||||
Once that PR has been approved by maintainers the change may be cherry-picked
|
||||
to the release branch (new release branches may be forked from this commit).
|
||||
|
||||
20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go`
|
||||
|
||||
30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files.
|
||||
|
||||
```
|
||||
make AUTHORS
|
||||
```
|
||||
|
||||
40. Create a signed tag.
|
||||
|
||||
Distribution uses semantic versioning. Tags are of the format
|
||||
`vx.y.z[-rcn]`. You will need PGP installed and a PGP key which has been added
|
||||
to your Github account. The comment for the tag should include the release
|
||||
notes, use previous tags as a guide for formatting consistently. Run
|
||||
`git tag -s vx.y.z[-rcn]` to create tag and `git -v vx.y.z[-rcn]` to verify tag,
|
||||
check comment and correct commit hash.
|
||||
|
||||
50. Push the signed tag
|
||||
|
||||
60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox.
|
||||
|
||||
70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request.
|
||||
|
||||
80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary.
|
||||
e.g. to release `2.3.1`
|
||||
|
||||
`2.3.1 (new)`
|
||||
|
||||
`2.3.0 -> 2.3.0` can be removed
|
||||
|
||||
`2 -> 2.3.1`
|
||||
|
||||
`2.3 -> 2.3.1`
|
||||
|
||||
90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images.
|
||||
|
||||
267
vendor/github.com/docker/distribution/ROADMAP.md
generated
vendored
Normal file
267
vendor/github.com/docker/distribution/ROADMAP.md
generated
vendored
Normal file
@@ -0,0 +1,267 @@
|
||||
# Roadmap
|
||||
|
||||
The Distribution Project consists of several components, some of which are
|
||||
still being defined. This document defines the high-level goals of the
|
||||
project, identifies the current components, and defines the release-
|
||||
relationship to the Docker Platform.
|
||||
|
||||
* [Distribution Goals](#distribution-goals)
|
||||
* [Distribution Components](#distribution-components)
|
||||
* [Project Planning](#project-planning): release-relationship to the Docker Platform.
|
||||
|
||||
This road map is a living document, providing an overview of the goals and
|
||||
considerations made in respect of the future of the project.
|
||||
|
||||
## Distribution Goals
|
||||
|
||||
- Replace the existing [docker registry](github.com/docker/docker-registry)
|
||||
implementation as the primary implementation.
|
||||
- Replace the existing push and pull code in the docker engine with the
|
||||
distribution package.
|
||||
- Define a strong data model for distributing docker images
|
||||
- Provide a flexible distribution tool kit for use in the docker platform
|
||||
- Unlock new distribution models
|
||||
|
||||
## Distribution Components
|
||||
|
||||
Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming
|
||||
features and bugfixes for a component will be added to the relevant milestone. If a feature or
|
||||
bugfix is not part of a milestone, it is currently unscheduled for
|
||||
implementation.
|
||||
|
||||
* [Registry](#registry)
|
||||
* [Distribution Package](#distribution-package)
|
||||
|
||||
***
|
||||
|
||||
### Registry
|
||||
|
||||
The new Docker registry is the main portion of the distribution repository.
|
||||
Registry 2.0 is the first release of the next-generation registry. This was
|
||||
primarily focused on implementing the [new registry
|
||||
API](https://github.com/docker/distribution/blob/master/docs/spec/api.md),
|
||||
with a focus on security and performance.
|
||||
|
||||
Following from the Distribution project goals above, we have a set of goals
|
||||
for registry v2 that we would like to follow in the design. New features
|
||||
should be compared against these goals.
|
||||
|
||||
#### Data Storage and Distribution First
|
||||
|
||||
The registry's first goal is to provide a reliable, consistent storage
|
||||
location for Docker images. The registry should only provide the minimal
|
||||
amount of indexing required to fetch image data and no more.
|
||||
|
||||
This means we should be selective in new features and API additions, including
|
||||
those that may require expensive, ever growing indexes. Requests should be
|
||||
servable in "constant time".
|
||||
|
||||
#### Content Addressability
|
||||
|
||||
All data objects used in the registry API should be content addressable.
|
||||
Content identifiers should be secure and verifiable. This provides a secure,
|
||||
reliable base from which to build more advanced content distribution systems.
|
||||
|
||||
#### Content Agnostic
|
||||
|
||||
In the past, changes to the image format would require large changes in Docker
|
||||
and the Registry. By decoupling the distribution and image format, we can
|
||||
allow the formats to progress without having to coordinate between the two.
|
||||
This means that we should be focused on decoupling Docker from the registry
|
||||
just as much as decoupling the registry from Docker. Such an approach will
|
||||
allow us to unlock new distribution models that haven't been possible before.
|
||||
|
||||
We can take this further by saying that the new registry should be content
|
||||
agnostic. The registry provides a model of names, tags, manifests and content
|
||||
addresses and that model can be used to work with content.
|
||||
|
||||
#### Simplicity
|
||||
|
||||
The new registry should be closer to a microservice component than its
|
||||
predecessor. This means it should have a narrower API and a low number of
|
||||
service dependencies. It should be easy to deploy.
|
||||
|
||||
This means that other solutions should be explored before changing the API or
|
||||
adding extra dependencies. If functionality is required, can it be added as an
|
||||
extension or companion service.
|
||||
|
||||
#### Extensibility
|
||||
|
||||
The registry should provide extension points to add functionality. By keeping
|
||||
the scope narrow, but providing the ability to add functionality.
|
||||
|
||||
Features like search, indexing, synchronization and registry explorers fall
|
||||
into this category. No such feature should be added unless we've found it
|
||||
impossible to do through an extension.
|
||||
|
||||
#### Active Feature Discussions
|
||||
|
||||
The following are feature discussions that are currently active.
|
||||
|
||||
If you don't see your favorite, unimplemented feature, feel free to contact us
|
||||
via IRC or the mailing list and we can talk about adding it. The goal here is
|
||||
to make sure that new features go through a rigid design process before
|
||||
landing in the registry.
|
||||
|
||||
##### Proxying to other Registries
|
||||
|
||||
A _pull-through caching_ mode exists for the registry, but is restricted from
|
||||
within the docker client to only mirror the official Docker Hub. This functionality
|
||||
can be expanded when image provenance has been specified and implemented in the
|
||||
distribution project.
|
||||
|
||||
##### Metadata storage
|
||||
|
||||
Metadata for the registry is currently stored with the manifest and layer data on
|
||||
the storage backend. While this is a big win for simplicity and reliably maintaining
|
||||
state, it comes with the cost of consistency and high latency. The mutable registry
|
||||
metadata operations should be abstracted behind an API which will allow ACID compliant
|
||||
storage systems to handle metadata.
|
||||
|
||||
##### Peer to Peer transfer
|
||||
|
||||
Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit
|
||||
|
||||
##### Indexing, Search and Discovery
|
||||
|
||||
The original registry provided some implementation of search for use with
|
||||
private registries. Support has been elided from V2 since we'd like to both
|
||||
decouple search functionality from the registry. The makes the registry
|
||||
simpler to deploy, especially in use cases where search is not needed, and
|
||||
let's us decouple the image format from the registry.
|
||||
|
||||
There are explorations into using the catalog API and notification system to
|
||||
build external indexes. The current line of thought is that we will define a
|
||||
common search API to index and query docker images. Such a system could be run
|
||||
as a companion to a registry or set of registries to power discovery.
|
||||
|
||||
The main issue with search and discovery is that there are so many ways to
|
||||
accomplish it. There are two aspects to this project. The first is deciding on
|
||||
how it will be done, including an API definition that can work with changing
|
||||
data formats. The second is the process of integrating with `docker search`.
|
||||
We expect that someone attempts to address the problem with the existing tools
|
||||
and propose it as a standard search API or uses it to inform a standardization
|
||||
process. Once this has been explored, we integrate with the docker client.
|
||||
|
||||
Please see the following for more detail:
|
||||
|
||||
- https://github.com/docker/distribution/issues/206
|
||||
|
||||
##### Deletes
|
||||
|
||||
> __NOTE:__ Deletes are a much asked for feature. Before requesting this
|
||||
feature or participating in discussion, we ask that you read this section in
|
||||
full and understand the problems behind deletes.
|
||||
|
||||
While, at first glance, implementing deleting seems simple, there are a number
|
||||
mitigating factors that make many solutions not ideal or even pathological in
|
||||
the context of a registry. The following paragraph discuss the background and
|
||||
approaches that could be applied to arrive at a solution.
|
||||
|
||||
The goal of deletes in any system is to remove unused or unneeded data. Only
|
||||
data requested for deletion should be removed and no other data. Removing
|
||||
unintended data is worse than _not_ removing data that was requested for
|
||||
removal but ideally, both are supported. Generally, according to this rule, we
|
||||
err on holding data longer than needed, ensuring that it is only removed when
|
||||
we can be certain that it can be removed. With the current behavior, we opt to
|
||||
hold onto the data forever, ensuring that data cannot be incorrectly removed.
|
||||
|
||||
To understand the problems with implementing deletes, one must understand the
|
||||
data model. All registry data is stored in a filesystem layout, implemented on
|
||||
a "storage driver", effectively a _virtual file system_ (VFS). The storage
|
||||
system must assume that this VFS layer will be eventually consistent and has
|
||||
poor read- after-write consistency, since this is the lower common denominator
|
||||
among the storage drivers. This is mitigated by writing values in reverse-
|
||||
dependent order, but makes wider transactional operations unsafe.
|
||||
|
||||
Layered on the VFS model is a content-addressable _directed, acyclic graph_
|
||||
(DAG) made up of blobs. Manifests reference layers. Tags reference manifests.
|
||||
Since the same data can be referenced by multiple manifests, we only store
|
||||
data once, even if it is in different repositories. Thus, we have a set of
|
||||
blobs, referenced by tags and manifests. If we want to delete a blob we need
|
||||
to be certain that it is no longer referenced by another manifest or tag. When
|
||||
we delete a manifest, we also can try to delete the referenced blobs. Deciding
|
||||
whether or not a blob has an active reference is the crux of the problem.
|
||||
|
||||
Conceptually, deleting a manifest and its resources is quite simple. Just find
|
||||
all the manifests, enumerate the referenced blobs and delete the blobs not in
|
||||
that set. An astute observer will recognize this as a garbage collection
|
||||
problem. As with garbage collection in programming languages, this is very
|
||||
simple when one always has a consistent view. When one adds parallelism and an
|
||||
inconsistent view of data, it becomes very challenging.
|
||||
|
||||
A simple example can demonstrate this. Let's say we are deleting a manifest
|
||||
_A_ in one process. We scan the manifest and decide that all the blobs are
|
||||
ready for deletion. Concurrently, we have another process accepting a new
|
||||
manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_
|
||||
is accepted and all the blobs are considered present, so the operation
|
||||
proceeds. The original process then deletes the referenced blobs, assuming
|
||||
they were unreferenced. The manifest _B_, which we thought had all of its data
|
||||
present, can no longer be served by the registry, since the dependent data has
|
||||
been deleted.
|
||||
|
||||
Deleting data from the registry safely requires some way to coordinate this
|
||||
operation. The following approaches are being considered:
|
||||
|
||||
- _Reference Counting_ - Maintain a count of references to each blob. This is
|
||||
challenging for a number of reasons: 1. maintaining a consistent consensus
|
||||
of reference counts across a set of Registries and 2. Building the initial
|
||||
list of reference counts for an existing registry. These challenges can be
|
||||
met with a consensus protocol like Paxos or Raft in the first case and a
|
||||
necessary but simple scan in the second..
|
||||
- _Lock the World GC_ - Halt all writes to the data store. Walk the data store
|
||||
and find all blob references. Delete all unreferenced blobs. This approach
|
||||
is very simple but requires disabling writes for a period of time while the
|
||||
service reads all data. This is slow and expensive but very accurate and
|
||||
effective.
|
||||
- _Generational GC_ - Do something similar to above but instead of blocking
|
||||
writes, writes are sent to another storage backend while reads are broadcast
|
||||
to the new and old backends. GC is then performed on the read-only portion.
|
||||
Because writes land in the new backend, the data in the read-only section
|
||||
can be safely deleted. The main drawbacks of this approach are complexity
|
||||
and coordination.
|
||||
- _Centralized Oracle_ - Using a centralized, transactional database, we can
|
||||
know exactly which data is referenced at any given time. This avoids
|
||||
coordination problem by managing this data in a single location. We trade
|
||||
off metadata scalability for simplicity and performance. This is a very good
|
||||
option for most registry deployments. This would create a bottleneck for
|
||||
registry metadata. However, metadata is generally not the main bottleneck
|
||||
when serving images.
|
||||
|
||||
Please let us know if other solutions exist that we have yet to enumerate.
|
||||
Note that for any approach, implementation is a massive consideration. For
|
||||
example, a mark-sweep based solution may seem simple but the amount of work in
|
||||
coordination offset the extra work it might take to build a _Centralized
|
||||
Oracle_. We'll accept proposals for any solution but please coordinate with us
|
||||
before dropping code.
|
||||
|
||||
At this time, we have traded off simplicity and ease of deployment for disk
|
||||
space. Simplicity and ease of deployment tend to reduce developer involvement,
|
||||
which is currently the most expensive resource in software engineering. Taking
|
||||
on any solution for deletes will greatly effect these factors, trading off
|
||||
very cheap disk space for a complex deployment and operational story.
|
||||
|
||||
Please see the following issues for more detail:
|
||||
|
||||
- https://github.com/docker/distribution/issues/422
|
||||
- https://github.com/docker/distribution/issues/461
|
||||
- https://github.com/docker/distribution/issues/462
|
||||
|
||||
### Distribution Package
|
||||
|
||||
At its core, the Distribution Project is a set of Go packages that make up
|
||||
Distribution Components. At this time, most of these packages make up the
|
||||
Registry implementation.
|
||||
|
||||
The package itself is considered unstable. If you're using it, please take care to vendor the dependent version.
|
||||
|
||||
For feature additions, please see the Registry section. In the future, we may break out a
|
||||
separate Roadmap for distribution-specific features that apply to more than
|
||||
just the registry.
|
||||
|
||||
***
|
||||
|
||||
### Project Planning
|
||||
|
||||
An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress.
|
||||
|
||||
257
vendor/github.com/docker/distribution/blobs.go
generated
vendored
Normal file
257
vendor/github.com/docker/distribution/blobs.go
generated
vendored
Normal file
@@ -0,0 +1,257 @@
|
||||
package distribution
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrBlobExists returned when blob already exists
|
||||
ErrBlobExists = errors.New("blob exists")
|
||||
|
||||
// ErrBlobDigestUnsupported when blob digest is an unsupported version.
|
||||
ErrBlobDigestUnsupported = errors.New("unsupported blob digest")
|
||||
|
||||
// ErrBlobUnknown when blob is not found.
|
||||
ErrBlobUnknown = errors.New("unknown blob")
|
||||
|
||||
// ErrBlobUploadUnknown returned when upload is not found.
|
||||
ErrBlobUploadUnknown = errors.New("blob upload unknown")
|
||||
|
||||
// ErrBlobInvalidLength returned when the blob has an expected length on
|
||||
// commit, meaning mismatched with the descriptor or an invalid value.
|
||||
ErrBlobInvalidLength = errors.New("blob invalid length")
|
||||
)
|
||||
|
||||
// ErrBlobInvalidDigest returned when digest check fails.
|
||||
type ErrBlobInvalidDigest struct {
|
||||
Digest digest.Digest
|
||||
Reason error
|
||||
}
|
||||
|
||||
func (err ErrBlobInvalidDigest) Error() string {
|
||||
return fmt.Sprintf("invalid digest for referenced layer: %v, %v",
|
||||
err.Digest, err.Reason)
|
||||
}
|
||||
|
||||
// ErrBlobMounted returned when a blob is mounted from another repository
|
||||
// instead of initiating an upload session.
|
||||
type ErrBlobMounted struct {
|
||||
From reference.Canonical
|
||||
Descriptor Descriptor
|
||||
}
|
||||
|
||||
func (err ErrBlobMounted) Error() string {
|
||||
return fmt.Sprintf("blob mounted from: %v to: %v",
|
||||
err.From, err.Descriptor)
|
||||
}
|
||||
|
||||
// Descriptor describes targeted content. Used in conjunction with a blob
|
||||
// store, a descriptor can be used to fetch, store and target any kind of
|
||||
// blob. The struct also describes the wire protocol format. Fields should
|
||||
// only be added but never changed.
|
||||
type Descriptor struct {
|
||||
// MediaType describe the type of the content. All text based formats are
|
||||
// encoded as utf-8.
|
||||
MediaType string `json:"mediaType,omitempty"`
|
||||
|
||||
// Size in bytes of content.
|
||||
Size int64 `json:"size,omitempty"`
|
||||
|
||||
// Digest uniquely identifies the content. A byte stream can be verified
|
||||
// against against this digest.
|
||||
Digest digest.Digest `json:"digest,omitempty"`
|
||||
|
||||
// URLs contains the source URLs of this content.
|
||||
URLs []string `json:"urls,omitempty"`
|
||||
|
||||
// NOTE: Before adding a field here, please ensure that all
|
||||
// other options have been exhausted. Much of the type relationships
|
||||
// depend on the simplicity of this type.
|
||||
}
|
||||
|
||||
// Descriptor returns the descriptor, to make it satisfy the Describable
|
||||
// interface. Note that implementations of Describable are generally objects
|
||||
// which can be described, not simply descriptors; this exception is in place
|
||||
// to make it more convenient to pass actual descriptors to functions that
|
||||
// expect Describable objects.
|
||||
func (d Descriptor) Descriptor() Descriptor {
|
||||
return d
|
||||
}
|
||||
|
||||
// BlobStatter makes blob descriptors available by digest. The service may
|
||||
// provide a descriptor of a different digest if the provided digest is not
|
||||
// canonical.
|
||||
type BlobStatter interface {
|
||||
// Stat provides metadata about a blob identified by the digest. If the
|
||||
// blob is unknown to the describer, ErrBlobUnknown will be returned.
|
||||
Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error)
|
||||
}
|
||||
|
||||
// BlobDeleter enables deleting blobs from storage.
|
||||
type BlobDeleter interface {
|
||||
Delete(ctx context.Context, dgst digest.Digest) error
|
||||
}
|
||||
|
||||
// BlobEnumerator enables iterating over blobs from storage
|
||||
type BlobEnumerator interface {
|
||||
Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error
|
||||
}
|
||||
|
||||
// BlobDescriptorService manages metadata about a blob by digest. Most
|
||||
// implementations will not expose such an interface explicitly. Such mappings
|
||||
// should be maintained by interacting with the BlobIngester. Hence, this is
|
||||
// left off of BlobService and BlobStore.
|
||||
type BlobDescriptorService interface {
|
||||
BlobStatter
|
||||
|
||||
// SetDescriptor assigns the descriptor to the digest. The provided digest and
|
||||
// the digest in the descriptor must map to identical content but they may
|
||||
// differ on their algorithm. The descriptor must have the canonical
|
||||
// digest of the content and the digest algorithm must match the
|
||||
// annotators canonical algorithm.
|
||||
//
|
||||
// Such a facility can be used to map blobs between digest domains, with
|
||||
// the restriction that the algorithm of the descriptor must match the
|
||||
// canonical algorithm (ie sha256) of the annotator.
|
||||
SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error
|
||||
|
||||
// Clear enables descriptors to be unlinked
|
||||
Clear(ctx context.Context, dgst digest.Digest) error
|
||||
}
|
||||
|
||||
// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService.
|
||||
type BlobDescriptorServiceFactory interface {
|
||||
BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
|
||||
}
|
||||
|
||||
// ReadSeekCloser is the primary reader type for blob data, combining
|
||||
// io.ReadSeeker with io.Closer.
|
||||
type ReadSeekCloser interface {
|
||||
io.ReadSeeker
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// BlobProvider describes operations for getting blob data.
|
||||
type BlobProvider interface {
|
||||
// Get returns the entire blob identified by digest along with the descriptor.
|
||||
Get(ctx context.Context, dgst digest.Digest) ([]byte, error)
|
||||
|
||||
// Open provides a ReadSeekCloser to the blob identified by the provided
|
||||
// descriptor. If the blob is not known to the service, an error will be
|
||||
// returned.
|
||||
Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error)
|
||||
}
|
||||
|
||||
// BlobServer can serve blobs via http.
|
||||
type BlobServer interface {
|
||||
// ServeBlob attempts to serve the blob, identified by dgst, via http. The
|
||||
// service may decide to redirect the client elsewhere or serve the data
|
||||
// directly.
|
||||
//
|
||||
// This handler only issues successful responses, such as 2xx or 3xx,
|
||||
// meaning it serves data or issues a redirect. If the blob is not
|
||||
// available, an error will be returned and the caller may still issue a
|
||||
// response.
|
||||
//
|
||||
// The implementation may serve the same blob from a different digest
|
||||
// domain. The appropriate headers will be set for the blob, unless they
|
||||
// have already been set by the caller.
|
||||
ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error
|
||||
}
|
||||
|
||||
// BlobIngester ingests blob data.
|
||||
type BlobIngester interface {
|
||||
// Put inserts the content p into the blob service, returning a descriptor
|
||||
// or an error.
|
||||
Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error)
|
||||
|
||||
// Create allocates a new blob writer to add a blob to this service. The
|
||||
// returned handle can be written to and later resumed using an opaque
|
||||
// identifier. With this approach, one can Close and Resume a BlobWriter
|
||||
// multiple times until the BlobWriter is committed or cancelled.
|
||||
Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
|
||||
|
||||
// Resume attempts to resume a write to a blob, identified by an id.
|
||||
Resume(ctx context.Context, id string) (BlobWriter, error)
|
||||
}
|
||||
|
||||
// BlobCreateOption is a general extensible function argument for blob creation
|
||||
// methods. A BlobIngester may choose to honor any or none of the given
|
||||
// BlobCreateOptions, which can be specific to the implementation of the
|
||||
// BlobIngester receiving them.
|
||||
// TODO (brianbland): unify this with ManifestServiceOption in the future
|
||||
type BlobCreateOption interface {
|
||||
Apply(interface{}) error
|
||||
}
|
||||
|
||||
// CreateOptions is a collection of blob creation modifiers relevant to general
|
||||
// blob storage intended to be configured by the BlobCreateOption.Apply method.
|
||||
type CreateOptions struct {
|
||||
Mount struct {
|
||||
ShouldMount bool
|
||||
From reference.Canonical
|
||||
// Stat allows to pass precalculated descriptor to link and return.
|
||||
// Blob access check will be skipped if set.
|
||||
Stat *Descriptor
|
||||
}
|
||||
}
|
||||
|
||||
// BlobWriter provides a handle for inserting data into a blob store.
|
||||
// Instances should be obtained from BlobWriteService.Writer and
|
||||
// BlobWriteService.Resume. If supported by the store, a writer can be
|
||||
// recovered with the id.
|
||||
type BlobWriter interface {
|
||||
io.WriteCloser
|
||||
io.ReaderFrom
|
||||
|
||||
// Size returns the number of bytes written to this blob.
|
||||
Size() int64
|
||||
|
||||
// ID returns the identifier for this writer. The ID can be used with the
|
||||
// Blob service to later resume the write.
|
||||
ID() string
|
||||
|
||||
// StartedAt returns the time this blob write was started.
|
||||
StartedAt() time.Time
|
||||
|
||||
// Commit completes the blob writer process. The content is verified
|
||||
// against the provided provisional descriptor, which may result in an
|
||||
// error. Depending on the implementation, written data may be validated
|
||||
// against the provisional descriptor fields. If MediaType is not present,
|
||||
// the implementation may reject the commit or assign "application/octet-
|
||||
// stream" to the blob. The returned descriptor may have a different
|
||||
// digest depending on the blob store, referred to as the canonical
|
||||
// descriptor.
|
||||
Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error)
|
||||
|
||||
// Cancel ends the blob write without storing any data and frees any
|
||||
// associated resources. Any data written thus far will be lost. Cancel
|
||||
// implementations should allow multiple calls even after a commit that
|
||||
// result in a no-op. This allows use of Cancel in a defer statement,
|
||||
// increasing the assurance that it is correctly called.
|
||||
Cancel(ctx context.Context) error
|
||||
}
|
||||
|
||||
// BlobService combines the operations to access, read and write blobs. This
|
||||
// can be used to describe remote blob services.
|
||||
type BlobService interface {
|
||||
BlobStatter
|
||||
BlobProvider
|
||||
BlobIngester
|
||||
}
|
||||
|
||||
// BlobStore represent the entire suite of blob related operations. Such an
|
||||
// implementation can access, read, write, delete and serve blobs.
|
||||
type BlobStore interface {
|
||||
BlobService
|
||||
BlobServer
|
||||
BlobDeleter
|
||||
}
|
||||
94
vendor/github.com/docker/distribution/circle.yml
generated
vendored
Normal file
94
vendor/github.com/docker/distribution/circle.yml
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
# Pony-up!
|
||||
machine:
|
||||
pre:
|
||||
# Install gvm
|
||||
- bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer)
|
||||
# Install codecov for coverage
|
||||
- pip install --user codecov
|
||||
|
||||
post:
|
||||
# go
|
||||
- gvm install go1.8 --prefer-binary --name=stable
|
||||
|
||||
environment:
|
||||
# Convenient shortcuts to "common" locations
|
||||
CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME
|
||||
BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
|
||||
# Trick circle brainflat "no absolute path" behavior
|
||||
BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR
|
||||
DOCKER_BUILDTAGS: "include_oss include_gcs"
|
||||
# Workaround Circle parsing dumb bugs and/or YAML wonkyness
|
||||
CIRCLE_PAIN: "mode: set"
|
||||
|
||||
hosts:
|
||||
# Not used yet
|
||||
fancy: 127.0.0.1
|
||||
|
||||
dependencies:
|
||||
pre:
|
||||
# Copy the code to the gopath of all go versions
|
||||
- >
|
||||
gvm use stable &&
|
||||
mkdir -p "$(dirname $BASE_STABLE)" &&
|
||||
cp -R "$CHECKOUT" "$BASE_STABLE"
|
||||
|
||||
override:
|
||||
# Install dependencies for every copied clone/go version
|
||||
- gvm use stable && go get github.com/lk4d4/vndr:
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
post:
|
||||
# For the stable go version, additionally install linting tools
|
||||
- >
|
||||
gvm use stable &&
|
||||
go get github.com/axw/gocov/gocov github.com/golang/lint/golint
|
||||
|
||||
test:
|
||||
pre:
|
||||
# Output the go versions we are going to test
|
||||
# - gvm use old && go version
|
||||
- gvm use stable && go version
|
||||
|
||||
# Ensure validation of dependencies
|
||||
- git fetch origin:
|
||||
pwd: $BASE_STABLE
|
||||
- gvm use stable && if test -n "`git diff --stat=1000 origin/master | grep -E \"^[[:space:]]*vendor\"`"; then make dep-validate; fi:
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
# First thing: build everything. This will catch compile errors, and it's
|
||||
# also necessary for go vet to work properly (see #807).
|
||||
- gvm use stable && go install $(go list ./... | grep -v "/vendor/"):
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
# FMT
|
||||
- gvm use stable && make fmt:
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
# VET
|
||||
- gvm use stable && make vet:
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
# LINT
|
||||
- gvm use stable && make lint:
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
override:
|
||||
# Test stable, and report
|
||||
- gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE':
|
||||
timeout: 1000
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
# Test stable with race
|
||||
- gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE':
|
||||
timeout: 1000
|
||||
pwd: $BASE_STABLE
|
||||
post:
|
||||
# Report to codecov
|
||||
- bash <(curl -s https://codecov.io/bash):
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
## Notes
|
||||
# Do we want these as well?
|
||||
# - go get code.google.com/p/go.tools/cmd/goimports
|
||||
# - test -z "$(goimports -l -w ./... | tee /dev/stderr)"
|
||||
# http://labix.org/gocheck
|
||||
97
vendor/github.com/docker/distribution/cmd/digest/main.go
generated
vendored
Normal file
97
vendor/github.com/docker/distribution/cmd/digest/main.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/docker/distribution/version"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
algorithm = digest.Canonical
|
||||
showVersion bool
|
||||
)
|
||||
|
||||
type job struct {
|
||||
name string
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.Var(&algorithm, "a", "select the digest algorithm (shorthand)")
|
||||
flag.Var(&algorithm, "algorithm", "select the digest algorithm")
|
||||
flag.BoolVar(&showVersion, "version", false, "show the version and exit")
|
||||
|
||||
log.SetFlags(0)
|
||||
log.SetPrefix(os.Args[0] + ": ")
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: %s [files...]\n", os.Args[0])
|
||||
fmt.Fprint(os.Stderr, `
|
||||
Calculate the digest of one or more input files, emitting the result
|
||||
to standard out. If no files are provided, the digest of stdin will
|
||||
be calculated.
|
||||
|
||||
`)
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
func unsupported() {
|
||||
log.Fatalf("unsupported digest algorithm: %v", algorithm)
|
||||
}
|
||||
|
||||
func main() {
|
||||
var jobs []job
|
||||
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
if showVersion {
|
||||
version.PrintVersion()
|
||||
return
|
||||
}
|
||||
|
||||
var fail bool // if we fail on one item, foul the exit code
|
||||
if flag.NArg() > 0 {
|
||||
for _, path := range flag.Args() {
|
||||
fp, err := os.Open(path)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("%s: %v", path, err)
|
||||
fail = true
|
||||
continue
|
||||
}
|
||||
defer fp.Close()
|
||||
|
||||
jobs = append(jobs, job{name: path, reader: fp})
|
||||
}
|
||||
} else {
|
||||
// just read stdin
|
||||
jobs = append(jobs, job{name: "-", reader: os.Stdin})
|
||||
}
|
||||
|
||||
digestFn := algorithm.FromReader
|
||||
|
||||
if !algorithm.Available() {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
for _, job := range jobs {
|
||||
dgst, err := digestFn(job.reader)
|
||||
if err != nil {
|
||||
log.Printf("%s: %v", job.name, err)
|
||||
fail = true
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf("%v\t%s\n", dgst, job.name)
|
||||
}
|
||||
|
||||
if fail {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
131
vendor/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go
generated
vendored
Normal file
131
vendor/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
// registry-api-descriptor-template uses the APIDescriptor defined in the
|
||||
// api/v2 package to execute templates passed to the command line.
|
||||
//
|
||||
// For example, to generate a new API specification, one would execute the
|
||||
// following command from the repo root:
|
||||
//
|
||||
// $ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md
|
||||
//
|
||||
// The templates are passed in the api/v2.APIDescriptor object. Please see the
|
||||
// package documentation for fields available on that object. The template
|
||||
// syntax is from Go's standard library text/template package. For information
|
||||
// on Go's template syntax, please see golang.org/pkg/text/template.
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"text/template"
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
)
|
||||
|
||||
var spaceRegex = regexp.MustCompile(`\n\s*`)
|
||||
|
||||
func main() {
|
||||
|
||||
if len(os.Args) != 2 {
|
||||
log.Fatalln("please specify a template to execute.")
|
||||
}
|
||||
|
||||
path := os.Args[1]
|
||||
filename := filepath.Base(path)
|
||||
|
||||
funcMap := template.FuncMap{
|
||||
"removenewlines": func(s string) string {
|
||||
return spaceRegex.ReplaceAllString(s, " ")
|
||||
},
|
||||
"statustext": http.StatusText,
|
||||
"prettygorilla": prettyGorillaMuxPath,
|
||||
}
|
||||
|
||||
tmpl := template.Must(template.New(filename).Funcs(funcMap).ParseFiles(path))
|
||||
|
||||
data := struct {
|
||||
RouteDescriptors []v2.RouteDescriptor
|
||||
ErrorDescriptors []errcode.ErrorDescriptor
|
||||
}{
|
||||
RouteDescriptors: v2.APIDescriptor.RouteDescriptors,
|
||||
ErrorDescriptors: append(errcode.GetErrorCodeGroup("registry.api.v2"),
|
||||
// The following are part of the specification but provided by errcode default.
|
||||
errcode.ErrorCodeUnauthorized.Descriptor(),
|
||||
errcode.ErrorCodeDenied.Descriptor(),
|
||||
errcode.ErrorCodeUnsupported.Descriptor()),
|
||||
}
|
||||
|
||||
if err := tmpl.Execute(os.Stdout, data); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
// prettyGorillaMuxPath removes the regular expressions from a gorilla/mux
|
||||
// route string, making it suitable for documentation.
|
||||
func prettyGorillaMuxPath(s string) string {
|
||||
// Stateful parser that removes regular expressions from gorilla
|
||||
// routes. It correctly handles balanced bracket pairs.
|
||||
|
||||
var output string
|
||||
var label string
|
||||
var level int
|
||||
|
||||
start:
|
||||
if s[0] == '{' {
|
||||
s = s[1:]
|
||||
level++
|
||||
goto capture
|
||||
}
|
||||
|
||||
output += string(s[0])
|
||||
s = s[1:]
|
||||
|
||||
goto end
|
||||
capture:
|
||||
switch s[0] {
|
||||
case '{':
|
||||
level++
|
||||
case '}':
|
||||
level--
|
||||
|
||||
if level == 0 {
|
||||
s = s[1:]
|
||||
goto label
|
||||
}
|
||||
case ':':
|
||||
s = s[1:]
|
||||
goto skip
|
||||
default:
|
||||
label += string(s[0])
|
||||
}
|
||||
s = s[1:]
|
||||
goto capture
|
||||
skip:
|
||||
switch s[0] {
|
||||
case '{':
|
||||
level++
|
||||
case '}':
|
||||
level--
|
||||
}
|
||||
s = s[1:]
|
||||
|
||||
if level == 0 {
|
||||
goto label
|
||||
}
|
||||
|
||||
goto skip
|
||||
label:
|
||||
if label != "" {
|
||||
output += "<" + label + ">"
|
||||
label = ""
|
||||
}
|
||||
end:
|
||||
if s != "" {
|
||||
goto start
|
||||
}
|
||||
|
||||
return output
|
||||
|
||||
}
|
||||
55
vendor/github.com/docker/distribution/cmd/registry/config-cache.yml
generated
vendored
Normal file
55
vendor/github.com/docker/distribution/cmd/registry/config-cache.yml
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
version: 0.1
|
||||
log:
|
||||
level: debug
|
||||
fields:
|
||||
service: registry
|
||||
environment: development
|
||||
storage:
|
||||
cache:
|
||||
blobdescriptor: redis
|
||||
filesystem:
|
||||
rootdirectory: /var/lib/registry-cache
|
||||
maintenance:
|
||||
uploadpurging:
|
||||
enabled: false
|
||||
http:
|
||||
addr: :5000
|
||||
secret: asecretforlocaldevelopment
|
||||
debug:
|
||||
addr: localhost:5001
|
||||
headers:
|
||||
X-Content-Type-Options: [nosniff]
|
||||
redis:
|
||||
addr: localhost:6379
|
||||
pool:
|
||||
maxidle: 16
|
||||
maxactive: 64
|
||||
idletimeout: 300s
|
||||
dialtimeout: 10ms
|
||||
readtimeout: 10ms
|
||||
writetimeout: 10ms
|
||||
notifications:
|
||||
endpoints:
|
||||
- name: local-8082
|
||||
url: http://localhost:5003/callback
|
||||
headers:
|
||||
Authorization: [Bearer <an example token>]
|
||||
timeout: 1s
|
||||
threshold: 10
|
||||
backoff: 1s
|
||||
disabled: true
|
||||
- name: local-8083
|
||||
url: http://localhost:8083/callback
|
||||
timeout: 1s
|
||||
threshold: 10
|
||||
backoff: 1s
|
||||
disabled: true
|
||||
proxy:
|
||||
remoteurl: https://registry-1.docker.io
|
||||
username: username
|
||||
password: password
|
||||
health:
|
||||
storagedriver:
|
||||
enabled: true
|
||||
interval: 10s
|
||||
threshold: 3
|
||||
66
vendor/github.com/docker/distribution/cmd/registry/config-dev.yml
generated
vendored
Normal file
66
vendor/github.com/docker/distribution/cmd/registry/config-dev.yml
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
version: 0.1
|
||||
log:
|
||||
level: debug
|
||||
fields:
|
||||
service: registry
|
||||
environment: development
|
||||
hooks:
|
||||
- type: mail
|
||||
disabled: true
|
||||
levels:
|
||||
- panic
|
||||
options:
|
||||
smtp:
|
||||
addr: mail.example.com:25
|
||||
username: mailuser
|
||||
password: password
|
||||
insecure: true
|
||||
from: sender@example.com
|
||||
to:
|
||||
- errors@example.com
|
||||
storage:
|
||||
delete:
|
||||
enabled: true
|
||||
cache:
|
||||
blobdescriptor: redis
|
||||
filesystem:
|
||||
rootdirectory: /var/lib/registry
|
||||
maintenance:
|
||||
uploadpurging:
|
||||
enabled: false
|
||||
http:
|
||||
addr: :5000
|
||||
debug:
|
||||
addr: localhost:5001
|
||||
headers:
|
||||
X-Content-Type-Options: [nosniff]
|
||||
redis:
|
||||
addr: localhost:6379
|
||||
pool:
|
||||
maxidle: 16
|
||||
maxactive: 64
|
||||
idletimeout: 300s
|
||||
dialtimeout: 10ms
|
||||
readtimeout: 10ms
|
||||
writetimeout: 10ms
|
||||
notifications:
|
||||
endpoints:
|
||||
- name: local-5003
|
||||
url: http://localhost:5003/callback
|
||||
headers:
|
||||
Authorization: [Bearer <an example token>]
|
||||
timeout: 1s
|
||||
threshold: 10
|
||||
backoff: 1s
|
||||
disabled: true
|
||||
- name: local-8083
|
||||
url: http://localhost:8083/callback
|
||||
timeout: 1s
|
||||
threshold: 10
|
||||
backoff: 1s
|
||||
disabled: true
|
||||
health:
|
||||
storagedriver:
|
||||
enabled: true
|
||||
interval: 10s
|
||||
threshold: 3
|
||||
18
vendor/github.com/docker/distribution/cmd/registry/config-example.yml
generated
vendored
Normal file
18
vendor/github.com/docker/distribution/cmd/registry/config-example.yml
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
version: 0.1
|
||||
log:
|
||||
fields:
|
||||
service: registry
|
||||
storage:
|
||||
cache:
|
||||
blobdescriptor: inmemory
|
||||
filesystem:
|
||||
rootdirectory: /var/lib/registry
|
||||
http:
|
||||
addr: :5000
|
||||
headers:
|
||||
X-Content-Type-Options: [nosniff]
|
||||
health:
|
||||
storagedriver:
|
||||
enabled: true
|
||||
interval: 10s
|
||||
threshold: 3
|
||||
25
vendor/github.com/docker/distribution/cmd/registry/main.go
generated
vendored
Normal file
25
vendor/github.com/docker/distribution/cmd/registry/main.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/docker/distribution/registry"
|
||||
_ "github.com/docker/distribution/registry/auth/htpasswd"
|
||||
_ "github.com/docker/distribution/registry/auth/silly"
|
||||
_ "github.com/docker/distribution/registry/auth/token"
|
||||
_ "github.com/docker/distribution/registry/proxy"
|
||||
_ "github.com/docker/distribution/registry/storage/driver/azure"
|
||||
_ "github.com/docker/distribution/registry/storage/driver/filesystem"
|
||||
_ "github.com/docker/distribution/registry/storage/driver/gcs"
|
||||
_ "github.com/docker/distribution/registry/storage/driver/inmemory"
|
||||
_ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront"
|
||||
_ "github.com/docker/distribution/registry/storage/driver/middleware/redirect"
|
||||
_ "github.com/docker/distribution/registry/storage/driver/oss"
|
||||
_ "github.com/docker/distribution/registry/storage/driver/s3-aws"
|
||||
_ "github.com/docker/distribution/registry/storage/driver/s3-goamz"
|
||||
_ "github.com/docker/distribution/registry/storage/driver/swift"
|
||||
)
|
||||
|
||||
func main() {
|
||||
registry.RootCmd.Execute()
|
||||
}
|
||||
647
vendor/github.com/docker/distribution/configuration/configuration.go
generated
vendored
Normal file
647
vendor/github.com/docker/distribution/configuration/configuration.go
generated
vendored
Normal file
@@ -0,0 +1,647 @@
|
||||
package configuration
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and
|
||||
// optionally modified by environment variables.
|
||||
//
|
||||
// Note that yaml field names should never include _ characters, since this is the separator used
|
||||
// in environment variable names.
|
||||
type Configuration struct {
|
||||
// Version is the version which defines the format of the rest of the configuration
|
||||
Version Version `yaml:"version"`
|
||||
|
||||
// Log supports setting various parameters related to the logging
|
||||
// subsystem.
|
||||
Log struct {
|
||||
// AccessLog configures access logging.
|
||||
AccessLog struct {
|
||||
// Disabled disables access logging.
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
} `yaml:"accesslog,omitempty"`
|
||||
|
||||
// Level is the granularity at which registry operations are logged.
|
||||
Level Loglevel `yaml:"level"`
|
||||
|
||||
// Formatter overrides the default formatter with another. Options
|
||||
// include "text", "json" and "logstash".
|
||||
Formatter string `yaml:"formatter,omitempty"`
|
||||
|
||||
// Fields allows users to specify static string fields to include in
|
||||
// the logger context.
|
||||
Fields map[string]interface{} `yaml:"fields,omitempty"`
|
||||
|
||||
// Hooks allows users to configure the log hooks, to enabling the
|
||||
// sequent handling behavior, when defined levels of log message emit.
|
||||
Hooks []LogHook `yaml:"hooks,omitempty"`
|
||||
}
|
||||
|
||||
// Loglevel is the level at which registry operations are logged. This is
|
||||
// deprecated. Please use Log.Level in the future.
|
||||
Loglevel Loglevel `yaml:"loglevel,omitempty"`
|
||||
|
||||
// Storage is the configuration for the registry's storage driver
|
||||
Storage Storage `yaml:"storage"`
|
||||
|
||||
// Auth allows configuration of various authorization methods that may be
|
||||
// used to gate requests.
|
||||
Auth Auth `yaml:"auth,omitempty"`
|
||||
|
||||
// Middleware lists all middlewares to be used by the registry.
|
||||
Middleware map[string][]Middleware `yaml:"middleware,omitempty"`
|
||||
|
||||
// Reporting is the configuration for error reporting
|
||||
Reporting Reporting `yaml:"reporting,omitempty"`
|
||||
|
||||
// HTTP contains configuration parameters for the registry's http
|
||||
// interface.
|
||||
HTTP struct {
|
||||
// Addr specifies the bind address for the registry instance.
|
||||
Addr string `yaml:"addr,omitempty"`
|
||||
|
||||
// Net specifies the net portion of the bind address. A default empty value means tcp.
|
||||
Net string `yaml:"net,omitempty"`
|
||||
|
||||
// Host specifies an externally-reachable address for the registry, as a fully
|
||||
// qualified URL.
|
||||
Host string `yaml:"host,omitempty"`
|
||||
|
||||
Prefix string `yaml:"prefix,omitempty"`
|
||||
|
||||
// Secret specifies the secret key which HMAC tokens are created with.
|
||||
Secret string `yaml:"secret,omitempty"`
|
||||
|
||||
// RelativeURLs specifies that relative URLs should be returned in
|
||||
// Location headers
|
||||
RelativeURLs bool `yaml:"relativeurls,omitempty"`
|
||||
|
||||
// TLS instructs the http server to listen with a TLS configuration.
|
||||
// This only support simple tls configuration with a cert and key.
|
||||
// Mostly, this is useful for testing situations or simple deployments
|
||||
// that require tls. If more complex configurations are required, use
|
||||
// a proxy or make a proposal to add support here.
|
||||
TLS struct {
|
||||
// Certificate specifies the path to an x509 certificate file to
|
||||
// be used for TLS.
|
||||
Certificate string `yaml:"certificate,omitempty"`
|
||||
|
||||
// Key specifies the path to the x509 key file, which should
|
||||
// contain the private portion for the file specified in
|
||||
// Certificate.
|
||||
Key string `yaml:"key,omitempty"`
|
||||
|
||||
// Specifies the CA certs for client authentication
|
||||
// A file may contain multiple CA certificates encoded as PEM
|
||||
ClientCAs []string `yaml:"clientcas,omitempty"`
|
||||
|
||||
// LetsEncrypt is used to configuration setting up TLS through
|
||||
// Let's Encrypt instead of manually specifying certificate and
|
||||
// key. If a TLS certificate is specified, the Let's Encrypt
|
||||
// section will not be used.
|
||||
LetsEncrypt struct {
|
||||
// CacheFile specifies cache file to use for lets encrypt
|
||||
// certificates and keys.
|
||||
CacheFile string `yaml:"cachefile,omitempty"`
|
||||
|
||||
// Email is the email to use during Let's Encrypt registration
|
||||
Email string `yaml:"email,omitempty"`
|
||||
} `yaml:"letsencrypt,omitempty"`
|
||||
} `yaml:"tls,omitempty"`
|
||||
|
||||
// Headers is a set of headers to include in HTTP responses. A common
|
||||
// use case for this would be security headers such as
|
||||
// Strict-Transport-Security. The map keys are the header names, and
|
||||
// the values are the associated header payloads.
|
||||
Headers http.Header `yaml:"headers,omitempty"`
|
||||
|
||||
// Debug configures the http debug interface, if specified. This can
|
||||
// include services such as pprof, expvar and other data that should
|
||||
// not be exposed externally. Left disabled by default.
|
||||
Debug struct {
|
||||
// Addr specifies the bind address for the debug server.
|
||||
Addr string `yaml:"addr,omitempty"`
|
||||
} `yaml:"debug,omitempty"`
|
||||
|
||||
// HTTP2 configuration options
|
||||
HTTP2 struct {
|
||||
// Specifies whether the registry should disallow clients attempting
|
||||
// to connect via http2. If set to true, only http/1.1 is supported.
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
} `yaml:"http2,omitempty"`
|
||||
} `yaml:"http,omitempty"`
|
||||
|
||||
// Notifications specifies configuration about various endpoint to which
|
||||
// registry events are dispatched.
|
||||
Notifications Notifications `yaml:"notifications,omitempty"`
|
||||
|
||||
// Redis configures the redis pool available to the registry webapp.
|
||||
Redis struct {
|
||||
// Addr specifies the the redis instance available to the application.
|
||||
Addr string `yaml:"addr,omitempty"`
|
||||
|
||||
// Password string to use when making a connection.
|
||||
Password string `yaml:"password,omitempty"`
|
||||
|
||||
// DB specifies the database to connect to on the redis instance.
|
||||
DB int `yaml:"db,omitempty"`
|
||||
|
||||
DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect
|
||||
ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data
|
||||
WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data
|
||||
|
||||
// Pool configures the behavior of the redis connection pool.
|
||||
Pool struct {
|
||||
// MaxIdle sets the maximum number of idle connections.
|
||||
MaxIdle int `yaml:"maxidle,omitempty"`
|
||||
|
||||
// MaxActive sets the maximum number of connections that should be
|
||||
// opened before blocking a connection request.
|
||||
MaxActive int `yaml:"maxactive,omitempty"`
|
||||
|
||||
// IdleTimeout sets the amount time to wait before closing
|
||||
// inactive connections.
|
||||
IdleTimeout time.Duration `yaml:"idletimeout,omitempty"`
|
||||
} `yaml:"pool,omitempty"`
|
||||
} `yaml:"redis,omitempty"`
|
||||
|
||||
Health Health `yaml:"health,omitempty"`
|
||||
|
||||
Proxy Proxy `yaml:"proxy,omitempty"`
|
||||
|
||||
// Compatibility is used for configurations of working with older or deprecated features.
|
||||
Compatibility struct {
|
||||
// Schema1 configures how schema1 manifests will be handled
|
||||
Schema1 struct {
|
||||
// TrustKey is the signing key to use for adding the signature to
|
||||
// schema1 manifests.
|
||||
TrustKey string `yaml:"signingkeyfile,omitempty"`
|
||||
} `yaml:"schema1,omitempty"`
|
||||
} `yaml:"compatibility,omitempty"`
|
||||
|
||||
// Validation configures validation options for the registry.
|
||||
Validation struct {
|
||||
// Enabled enables the other options in this section. This field is
|
||||
// deprecated in favor of Disabled.
|
||||
Enabled bool `yaml:"enabled,omitempty"`
|
||||
// Disabled disables the other options in this section.
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
// Manifests configures manifest validation.
|
||||
Manifests struct {
|
||||
// URLs configures validation for URLs in pushed manifests.
|
||||
URLs struct {
|
||||
// Allow specifies regular expressions (https://godoc.org/regexp/syntax)
|
||||
// that URLs in pushed manifests must match.
|
||||
Allow []string `yaml:"allow,omitempty"`
|
||||
// Deny specifies regular expressions (https://godoc.org/regexp/syntax)
|
||||
// that URLs in pushed manifests must not match.
|
||||
Deny []string `yaml:"deny,omitempty"`
|
||||
} `yaml:"urls,omitempty"`
|
||||
} `yaml:"manifests,omitempty"`
|
||||
} `yaml:"validation,omitempty"`
|
||||
|
||||
// Policy configures registry policy options.
|
||||
Policy struct {
|
||||
// Repository configures policies for repositories
|
||||
Repository struct {
|
||||
// Classes is a list of repository classes which the
|
||||
// registry allows content for. This class is matched
|
||||
// against the configuration media type inside uploaded
|
||||
// manifests. When non-empty, the registry will enforce
|
||||
// the class in authorized resources.
|
||||
Classes []string `yaml:"classes"`
|
||||
} `yaml:"repository,omitempty"`
|
||||
} `yaml:"policy,omitempty"`
|
||||
}
|
||||
|
||||
// LogHook is composed of hook Level and Type.
|
||||
// After hooks configuration, it can execute the next handling automatically,
|
||||
// when defined levels of log message emitted.
|
||||
// Example: hook can sending an email notification when error log happens in app.
|
||||
type LogHook struct {
|
||||
// Disable lets user select to enable hook or not.
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
|
||||
// Type allows user to select which type of hook handler they want.
|
||||
Type string `yaml:"type,omitempty"`
|
||||
|
||||
// Levels set which levels of log message will let hook executed.
|
||||
Levels []string `yaml:"levels,omitempty"`
|
||||
|
||||
// MailOptions allows user to configure email parameters.
|
||||
MailOptions MailOptions `yaml:"options,omitempty"`
|
||||
}
|
||||
|
||||
// MailOptions provides the configuration sections to user, for specific handler.
|
||||
type MailOptions struct {
|
||||
SMTP struct {
|
||||
// Addr defines smtp host address
|
||||
Addr string `yaml:"addr,omitempty"`
|
||||
|
||||
// Username defines user name to smtp host
|
||||
Username string `yaml:"username,omitempty"`
|
||||
|
||||
// Password defines password of login user
|
||||
Password string `yaml:"password,omitempty"`
|
||||
|
||||
// Insecure defines if smtp login skips the secure certification.
|
||||
Insecure bool `yaml:"insecure,omitempty"`
|
||||
} `yaml:"smtp,omitempty"`
|
||||
|
||||
// From defines mail sending address
|
||||
From string `yaml:"from,omitempty"`
|
||||
|
||||
// To defines mail receiving address
|
||||
To []string `yaml:"to,omitempty"`
|
||||
}
|
||||
|
||||
// FileChecker is a type of entry in the health section for checking files.
|
||||
type FileChecker struct {
|
||||
// Interval is the duration in between checks
|
||||
Interval time.Duration `yaml:"interval,omitempty"`
|
||||
// File is the path to check
|
||||
File string `yaml:"file,omitempty"`
|
||||
// Threshold is the number of times a check must fail to trigger an
|
||||
// unhealthy state
|
||||
Threshold int `yaml:"threshold,omitempty"`
|
||||
}
|
||||
|
||||
// HTTPChecker is a type of entry in the health section for checking HTTP URIs.
|
||||
type HTTPChecker struct {
|
||||
// Timeout is the duration to wait before timing out the HTTP request
|
||||
Timeout time.Duration `yaml:"timeout,omitempty"`
|
||||
// StatusCode is the expected status code
|
||||
StatusCode int
|
||||
// Interval is the duration in between checks
|
||||
Interval time.Duration `yaml:"interval,omitempty"`
|
||||
// URI is the HTTP URI to check
|
||||
URI string `yaml:"uri,omitempty"`
|
||||
// Headers lists static headers that should be added to all requests
|
||||
Headers http.Header `yaml:"headers"`
|
||||
// Threshold is the number of times a check must fail to trigger an
|
||||
// unhealthy state
|
||||
Threshold int `yaml:"threshold,omitempty"`
|
||||
}
|
||||
|
||||
// TCPChecker is a type of entry in the health section for checking TCP servers.
|
||||
type TCPChecker struct {
|
||||
// Timeout is the duration to wait before timing out the TCP connection
|
||||
Timeout time.Duration `yaml:"timeout,omitempty"`
|
||||
// Interval is the duration in between checks
|
||||
Interval time.Duration `yaml:"interval,omitempty"`
|
||||
// Addr is the TCP address to check
|
||||
Addr string `yaml:"addr,omitempty"`
|
||||
// Threshold is the number of times a check must fail to trigger an
|
||||
// unhealthy state
|
||||
Threshold int `yaml:"threshold,omitempty"`
|
||||
}
|
||||
|
||||
// Health provides the configuration section for health checks.
|
||||
type Health struct {
|
||||
// FileCheckers is a list of paths to check
|
||||
FileCheckers []FileChecker `yaml:"file,omitempty"`
|
||||
// HTTPCheckers is a list of URIs to check
|
||||
HTTPCheckers []HTTPChecker `yaml:"http,omitempty"`
|
||||
// TCPCheckers is a list of URIs to check
|
||||
TCPCheckers []TCPChecker `yaml:"tcp,omitempty"`
|
||||
// StorageDriver configures a health check on the configured storage
|
||||
// driver
|
||||
StorageDriver struct {
|
||||
// Enabled turns on the health check for the storage driver
|
||||
Enabled bool `yaml:"enabled,omitempty"`
|
||||
// Interval is the duration in between checks
|
||||
Interval time.Duration `yaml:"interval,omitempty"`
|
||||
// Threshold is the number of times a check must fail to trigger an
|
||||
// unhealthy state
|
||||
Threshold int `yaml:"threshold,omitempty"`
|
||||
} `yaml:"storagedriver,omitempty"`
|
||||
}
|
||||
|
||||
// v0_1Configuration is a Version 0.1 Configuration struct
|
||||
// This is currently aliased to Configuration, as it is the current version
|
||||
type v0_1Configuration Configuration
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface
|
||||
// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent unsigned integers
|
||||
func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var versionString string
|
||||
err := unmarshal(&versionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newVersion := Version(versionString)
|
||||
if _, err := newVersion.major(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := newVersion.minor(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*version = newVersion
|
||||
return nil
|
||||
}
|
||||
|
||||
// CurrentVersion is the most recent Version that can be parsed
|
||||
var CurrentVersion = MajorMinorVersion(0, 1)
|
||||
|
||||
// Loglevel is the level at which operations are logged
|
||||
// This can be error, warn, info, or debug
|
||||
type Loglevel string
|
||||
|
||||
// UnmarshalYAML implements the yaml.Umarshaler interface
|
||||
// Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a
|
||||
// valid loglevel
|
||||
func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var loglevelString string
|
||||
err := unmarshal(&loglevelString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
loglevelString = strings.ToLower(loglevelString)
|
||||
switch loglevelString {
|
||||
case "error", "warn", "info", "debug":
|
||||
default:
|
||||
return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString)
|
||||
}
|
||||
|
||||
*loglevel = Loglevel(loglevelString)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parameters defines a key-value parameters mapping
|
||||
type Parameters map[string]interface{}
|
||||
|
||||
// Storage defines the configuration for registry object storage
|
||||
type Storage map[string]Parameters
|
||||
|
||||
// Type returns the storage driver type, such as filesystem or s3
|
||||
func (storage Storage) Type() string {
|
||||
var storageType []string
|
||||
|
||||
// Return only key in this map
|
||||
for k := range storage {
|
||||
switch k {
|
||||
case "maintenance":
|
||||
// allow configuration of maintenance
|
||||
case "cache":
|
||||
// allow configuration of caching
|
||||
case "delete":
|
||||
// allow configuration of delete
|
||||
case "redirect":
|
||||
// allow configuration of redirect
|
||||
default:
|
||||
storageType = append(storageType, k)
|
||||
}
|
||||
}
|
||||
if len(storageType) > 1 {
|
||||
panic("multiple storage drivers specified in configuration or environment: " + strings.Join(storageType, ", "))
|
||||
}
|
||||
if len(storageType) == 1 {
|
||||
return storageType[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Parameters returns the Parameters map for a Storage configuration
|
||||
func (storage Storage) Parameters() Parameters {
|
||||
return storage[storage.Type()]
|
||||
}
|
||||
|
||||
// setParameter changes the parameter at the provided key to the new value
|
||||
func (storage Storage) setParameter(key string, value interface{}) {
|
||||
storage[storage.Type()][key] = value
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface
|
||||
// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters
|
||||
func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var storageMap map[string]Parameters
|
||||
err := unmarshal(&storageMap)
|
||||
if err == nil {
|
||||
if len(storageMap) > 1 {
|
||||
types := make([]string, 0, len(storageMap))
|
||||
for k := range storageMap {
|
||||
switch k {
|
||||
case "maintenance":
|
||||
// allow for configuration of maintenance
|
||||
case "cache":
|
||||
// allow configuration of caching
|
||||
case "delete":
|
||||
// allow configuration of delete
|
||||
case "redirect":
|
||||
// allow configuration of redirect
|
||||
default:
|
||||
types = append(types, k)
|
||||
}
|
||||
}
|
||||
|
||||
if len(types) > 1 {
|
||||
return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types)
|
||||
}
|
||||
}
|
||||
*storage = storageMap
|
||||
return nil
|
||||
}
|
||||
|
||||
var storageType string
|
||||
err = unmarshal(&storageType)
|
||||
if err == nil {
|
||||
*storage = Storage{storageType: Parameters{}}
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalYAML implements the yaml.Marshaler interface
|
||||
func (storage Storage) MarshalYAML() (interface{}, error) {
|
||||
if storage.Parameters() == nil {
|
||||
return storage.Type(), nil
|
||||
}
|
||||
return map[string]Parameters(storage), nil
|
||||
}
|
||||
|
||||
// Auth defines the configuration for registry authorization.
|
||||
type Auth map[string]Parameters
|
||||
|
||||
// Type returns the auth type, such as htpasswd or token
|
||||
func (auth Auth) Type() string {
|
||||
// Return only key in this map
|
||||
for k := range auth {
|
||||
return k
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Parameters returns the Parameters map for an Auth configuration
|
||||
func (auth Auth) Parameters() Parameters {
|
||||
return auth[auth.Type()]
|
||||
}
|
||||
|
||||
// setParameter changes the parameter at the provided key to the new value
|
||||
func (auth Auth) setParameter(key string, value interface{}) {
|
||||
auth[auth.Type()][key] = value
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface
|
||||
// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters
|
||||
func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var m map[string]Parameters
|
||||
err := unmarshal(&m)
|
||||
if err == nil {
|
||||
if len(m) > 1 {
|
||||
types := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
types = append(types, k)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): May want to change this slightly for
|
||||
// authorization to allow multiple challenges.
|
||||
return fmt.Errorf("must provide exactly one type. Provided: %v", types)
|
||||
|
||||
}
|
||||
*auth = m
|
||||
return nil
|
||||
}
|
||||
|
||||
var authType string
|
||||
err = unmarshal(&authType)
|
||||
if err == nil {
|
||||
*auth = Auth{authType: Parameters{}}
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalYAML implements the yaml.Marshaler interface
|
||||
func (auth Auth) MarshalYAML() (interface{}, error) {
|
||||
if auth.Parameters() == nil {
|
||||
return auth.Type(), nil
|
||||
}
|
||||
return map[string]Parameters(auth), nil
|
||||
}
|
||||
|
||||
// Notifications configures multiple http endpoints.
|
||||
type Notifications struct {
|
||||
// Endpoints is a list of http configurations for endpoints that
|
||||
// respond to webhook notifications. In the future, we may allow other
|
||||
// kinds of endpoints, such as external queues.
|
||||
Endpoints []Endpoint `yaml:"endpoints,omitempty"`
|
||||
}
|
||||
|
||||
// Endpoint describes the configuration of an http webhook notification
|
||||
// endpoint.
|
||||
type Endpoint struct {
|
||||
Name string `yaml:"name"` // identifies the endpoint in the registry instance.
|
||||
Disabled bool `yaml:"disabled"` // disables the endpoint
|
||||
URL string `yaml:"url"` // post url for the endpoint.
|
||||
Headers http.Header `yaml:"headers"` // static headers that should be added to all requests
|
||||
Timeout time.Duration `yaml:"timeout"` // HTTP timeout
|
||||
Threshold int `yaml:"threshold"` // circuit breaker threshold before backing off on failure
|
||||
Backoff time.Duration `yaml:"backoff"` // backoff duration
|
||||
IgnoredMediaTypes []string `yaml:"ignoredmediatypes"` // target media types to ignore
|
||||
}
|
||||
|
||||
// Reporting defines error reporting methods.
|
||||
type Reporting struct {
|
||||
// Bugsnag configures error reporting for Bugsnag (bugsnag.com).
|
||||
Bugsnag BugsnagReporting `yaml:"bugsnag,omitempty"`
|
||||
// NewRelic configures error reporting for NewRelic (newrelic.com)
|
||||
NewRelic NewRelicReporting `yaml:"newrelic,omitempty"`
|
||||
}
|
||||
|
||||
// BugsnagReporting configures error reporting for Bugsnag (bugsnag.com).
|
||||
type BugsnagReporting struct {
|
||||
// APIKey is the Bugsnag api key.
|
||||
APIKey string `yaml:"apikey,omitempty"`
|
||||
// ReleaseStage tracks where the registry is deployed.
|
||||
// Examples: production, staging, development
|
||||
ReleaseStage string `yaml:"releasestage,omitempty"`
|
||||
// Endpoint is used for specifying an enterprise Bugsnag endpoint.
|
||||
Endpoint string `yaml:"endpoint,omitempty"`
|
||||
}
|
||||
|
||||
// NewRelicReporting configures error reporting for NewRelic (newrelic.com)
|
||||
type NewRelicReporting struct {
|
||||
// LicenseKey is the NewRelic user license key
|
||||
LicenseKey string `yaml:"licensekey,omitempty"`
|
||||
// Name is the component name of the registry in NewRelic
|
||||
Name string `yaml:"name,omitempty"`
|
||||
// Verbose configures debug output to STDOUT
|
||||
Verbose bool `yaml:"verbose,omitempty"`
|
||||
}
|
||||
|
||||
// Middleware configures named middlewares to be applied at injection points.
|
||||
type Middleware struct {
|
||||
// Name the middleware registers itself as
|
||||
Name string `yaml:"name"`
|
||||
// Flag to disable middleware easily
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
// Map of parameters that will be passed to the middleware's initialization function
|
||||
Options Parameters `yaml:"options"`
|
||||
}
|
||||
|
||||
// Proxy configures the registry as a pull through cache
|
||||
type Proxy struct {
|
||||
// RemoteURL is the URL of the remote registry
|
||||
RemoteURL string `yaml:"remoteurl"`
|
||||
|
||||
// Username of the hub user
|
||||
Username string `yaml:"username"`
|
||||
|
||||
// Password of the hub user
|
||||
Password string `yaml:"password"`
|
||||
}
|
||||
|
||||
// Parse parses an input configuration yaml document into a Configuration struct
|
||||
// This should generally be capable of handling old configuration format versions
|
||||
//
|
||||
// Environment variables may be used to override configuration parameters other than version,
|
||||
// following the scheme below:
|
||||
// Configuration.Abc may be replaced by the value of REGISTRY_ABC,
|
||||
// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth
|
||||
func Parse(rd io.Reader) (*Configuration, error) {
|
||||
in, err := ioutil.ReadAll(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := NewParser("registry", []VersionedParseInfo{
|
||||
{
|
||||
Version: MajorMinorVersion(0, 1),
|
||||
ParseAs: reflect.TypeOf(v0_1Configuration{}),
|
||||
ConversionFunc: func(c interface{}) (interface{}, error) {
|
||||
if v0_1, ok := c.(*v0_1Configuration); ok {
|
||||
if v0_1.Loglevel == Loglevel("") {
|
||||
v0_1.Loglevel = Loglevel("info")
|
||||
}
|
||||
if v0_1.Storage.Type() == "" {
|
||||
return nil, errors.New("No storage configuration provided")
|
||||
}
|
||||
return (*Configuration)(v0_1), nil
|
||||
}
|
||||
return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c)
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
config := new(Configuration)
|
||||
err = p.Parse(in, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
529
vendor/github.com/docker/distribution/configuration/configuration_test.go
generated
vendored
Normal file
529
vendor/github.com/docker/distribution/configuration/configuration_test.go
generated
vendored
Normal file
@@ -0,0 +1,529 @@
|
||||
package configuration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
// configStruct is a canonical example configuration, which should map to configYamlV0_1
|
||||
var configStruct = Configuration{
|
||||
Version: "0.1",
|
||||
Log: struct {
|
||||
AccessLog struct {
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
} `yaml:"accesslog,omitempty"`
|
||||
Level Loglevel `yaml:"level"`
|
||||
Formatter string `yaml:"formatter,omitempty"`
|
||||
Fields map[string]interface{} `yaml:"fields,omitempty"`
|
||||
Hooks []LogHook `yaml:"hooks,omitempty"`
|
||||
}{
|
||||
Fields: map[string]interface{}{"environment": "test"},
|
||||
},
|
||||
Loglevel: "info",
|
||||
Storage: Storage{
|
||||
"s3": Parameters{
|
||||
"region": "us-east-1",
|
||||
"bucket": "my-bucket",
|
||||
"rootdirectory": "/registry",
|
||||
"encrypt": true,
|
||||
"secure": false,
|
||||
"accesskey": "SAMPLEACCESSKEY",
|
||||
"secretkey": "SUPERSECRET",
|
||||
"host": nil,
|
||||
"port": 42,
|
||||
},
|
||||
},
|
||||
Auth: Auth{
|
||||
"silly": Parameters{
|
||||
"realm": "silly",
|
||||
"service": "silly",
|
||||
},
|
||||
},
|
||||
Reporting: Reporting{
|
||||
Bugsnag: BugsnagReporting{
|
||||
APIKey: "BugsnagApiKey",
|
||||
},
|
||||
},
|
||||
Notifications: Notifications{
|
||||
Endpoints: []Endpoint{
|
||||
{
|
||||
Name: "endpoint-1",
|
||||
URL: "http://example.com",
|
||||
Headers: http.Header{
|
||||
"Authorization": []string{"Bearer <example>"},
|
||||
},
|
||||
IgnoredMediaTypes: []string{"application/octet-stream"},
|
||||
},
|
||||
},
|
||||
},
|
||||
HTTP: struct {
|
||||
Addr string `yaml:"addr,omitempty"`
|
||||
Net string `yaml:"net,omitempty"`
|
||||
Host string `yaml:"host,omitempty"`
|
||||
Prefix string `yaml:"prefix,omitempty"`
|
||||
Secret string `yaml:"secret,omitempty"`
|
||||
RelativeURLs bool `yaml:"relativeurls,omitempty"`
|
||||
TLS struct {
|
||||
Certificate string `yaml:"certificate,omitempty"`
|
||||
Key string `yaml:"key,omitempty"`
|
||||
ClientCAs []string `yaml:"clientcas,omitempty"`
|
||||
LetsEncrypt struct {
|
||||
CacheFile string `yaml:"cachefile,omitempty"`
|
||||
Email string `yaml:"email,omitempty"`
|
||||
} `yaml:"letsencrypt,omitempty"`
|
||||
} `yaml:"tls,omitempty"`
|
||||
Headers http.Header `yaml:"headers,omitempty"`
|
||||
Debug struct {
|
||||
Addr string `yaml:"addr,omitempty"`
|
||||
} `yaml:"debug,omitempty"`
|
||||
HTTP2 struct {
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
} `yaml:"http2,omitempty"`
|
||||
}{
|
||||
TLS: struct {
|
||||
Certificate string `yaml:"certificate,omitempty"`
|
||||
Key string `yaml:"key,omitempty"`
|
||||
ClientCAs []string `yaml:"clientcas,omitempty"`
|
||||
LetsEncrypt struct {
|
||||
CacheFile string `yaml:"cachefile,omitempty"`
|
||||
Email string `yaml:"email,omitempty"`
|
||||
} `yaml:"letsencrypt,omitempty"`
|
||||
}{
|
||||
ClientCAs: []string{"/path/to/ca.pem"},
|
||||
},
|
||||
Headers: http.Header{
|
||||
"X-Content-Type-Options": []string{"nosniff"},
|
||||
},
|
||||
HTTP2: struct {
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
}{
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// configYamlV0_1 is a Version 0.1 yaml document representing configStruct
|
||||
var configYamlV0_1 = `
|
||||
version: 0.1
|
||||
log:
|
||||
fields:
|
||||
environment: test
|
||||
loglevel: info
|
||||
storage:
|
||||
s3:
|
||||
region: us-east-1
|
||||
bucket: my-bucket
|
||||
rootdirectory: /registry
|
||||
encrypt: true
|
||||
secure: false
|
||||
accesskey: SAMPLEACCESSKEY
|
||||
secretkey: SUPERSECRET
|
||||
host: ~
|
||||
port: 42
|
||||
auth:
|
||||
silly:
|
||||
realm: silly
|
||||
service: silly
|
||||
notifications:
|
||||
endpoints:
|
||||
- name: endpoint-1
|
||||
url: http://example.com
|
||||
headers:
|
||||
Authorization: [Bearer <example>]
|
||||
ignoredmediatypes:
|
||||
- application/octet-stream
|
||||
reporting:
|
||||
bugsnag:
|
||||
apikey: BugsnagApiKey
|
||||
http:
|
||||
clientcas:
|
||||
- /path/to/ca.pem
|
||||
headers:
|
||||
X-Content-Type-Options: [nosniff]
|
||||
`
|
||||
|
||||
// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory
|
||||
// storage driver with no parameters
|
||||
var inmemoryConfigYamlV0_1 = `
|
||||
version: 0.1
|
||||
loglevel: info
|
||||
storage: inmemory
|
||||
auth:
|
||||
silly:
|
||||
realm: silly
|
||||
service: silly
|
||||
notifications:
|
||||
endpoints:
|
||||
- name: endpoint-1
|
||||
url: http://example.com
|
||||
headers:
|
||||
Authorization: [Bearer <example>]
|
||||
ignoredmediatypes:
|
||||
- application/octet-stream
|
||||
http:
|
||||
headers:
|
||||
X-Content-Type-Options: [nosniff]
|
||||
`
|
||||
|
||||
type ConfigSuite struct {
|
||||
expectedConfig *Configuration
|
||||
}
|
||||
|
||||
var _ = Suite(new(ConfigSuite))
|
||||
|
||||
func (suite *ConfigSuite) SetUpTest(c *C) {
|
||||
os.Clearenv()
|
||||
suite.expectedConfig = copyConfig(configStruct)
|
||||
}
|
||||
|
||||
// TestMarshalRoundtrip validates that configStruct can be marshaled and
|
||||
// unmarshaled without changing any parameters
|
||||
func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) {
|
||||
configBytes, err := yaml.Marshal(suite.expectedConfig)
|
||||
c.Assert(err, IsNil)
|
||||
config, err := Parse(bytes.NewReader(configBytes))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||
}
|
||||
|
||||
// TestParseSimple validates that configYamlV0_1 can be parsed into a struct
|
||||
// matching configStruct
|
||||
func (suite *ConfigSuite) TestParseSimple(c *C) {
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||
}
|
||||
|
||||
// TestParseInmemory validates that configuration yaml with storage provided as
|
||||
// a string can be parsed into a Configuration struct with no storage parameters
|
||||
func (suite *ConfigSuite) TestParseInmemory(c *C) {
|
||||
suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}}
|
||||
suite.expectedConfig.Reporting = Reporting{}
|
||||
suite.expectedConfig.Log.Fields = nil
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1)))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||
}
|
||||
|
||||
// TestParseIncomplete validates that an incomplete yaml configuration cannot
|
||||
// be parsed without providing environment variables to fill in the missing
|
||||
// components.
|
||||
func (suite *ConfigSuite) TestParseIncomplete(c *C) {
|
||||
incompleteConfigYaml := "version: 0.1"
|
||||
_, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml)))
|
||||
c.Assert(err, NotNil)
|
||||
|
||||
suite.expectedConfig.Log.Fields = nil
|
||||
suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}}
|
||||
suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}}
|
||||
suite.expectedConfig.Reporting = Reporting{}
|
||||
suite.expectedConfig.Notifications = Notifications{}
|
||||
suite.expectedConfig.HTTP.Headers = nil
|
||||
|
||||
// Note: this also tests that REGISTRY_STORAGE and
|
||||
// REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY can be used together
|
||||
os.Setenv("REGISTRY_STORAGE", "filesystem")
|
||||
os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot")
|
||||
os.Setenv("REGISTRY_AUTH", "silly")
|
||||
os.Setenv("REGISTRY_AUTH_SILLY_REALM", "silly")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml)))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||
}
|
||||
|
||||
// TestParseWithSameEnvStorage validates that providing environment variables
|
||||
// that match the given storage type will only include environment-defined
|
||||
// parameters and remove yaml-defined parameters
|
||||
func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) {
|
||||
suite.expectedConfig.Storage = Storage{"s3": Parameters{"region": "us-east-1"}}
|
||||
|
||||
os.Setenv("REGISTRY_STORAGE", "s3")
|
||||
os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||
}
|
||||
|
||||
// TestParseWithDifferentEnvStorageParams validates that providing environment variables that change
|
||||
// and add to the given storage parameters will change and add parameters to the parsed
|
||||
// Configuration struct
|
||||
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) {
|
||||
suite.expectedConfig.Storage.setParameter("region", "us-west-1")
|
||||
suite.expectedConfig.Storage.setParameter("secure", true)
|
||||
suite.expectedConfig.Storage.setParameter("newparam", "some Value")
|
||||
|
||||
os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-west-1")
|
||||
os.Setenv("REGISTRY_STORAGE_S3_SECURE", "true")
|
||||
os.Setenv("REGISTRY_STORAGE_S3_NEWPARAM", "some Value")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||
}
|
||||
|
||||
// TestParseWithDifferentEnvStorageType validates that providing an environment variable that
|
||||
// changes the storage type will be reflected in the parsed Configuration struct
|
||||
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) {
|
||||
suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}}
|
||||
|
||||
os.Setenv("REGISTRY_STORAGE", "inmemory")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||
}
|
||||
|
||||
// TestParseWithDifferentEnvStorageTypeAndParams validates that providing an environment variable
|
||||
// that changes the storage type will be reflected in the parsed Configuration struct and that
|
||||
// environment storage parameters will also be included
|
||||
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) {
|
||||
suite.expectedConfig.Storage = Storage{"filesystem": Parameters{}}
|
||||
suite.expectedConfig.Storage.setParameter("rootdirectory", "/tmp/testroot")
|
||||
|
||||
os.Setenv("REGISTRY_STORAGE", "filesystem")
|
||||
os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||
}
|
||||
|
||||
// TestParseWithSameEnvLoglevel validates that providing an environment variable defining the log
|
||||
// level to the same as the one provided in the yaml will not change the parsed Configuration struct
|
||||
func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) {
|
||||
os.Setenv("REGISTRY_LOGLEVEL", "info")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||
}
|
||||
|
||||
// TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the
|
||||
// log level will override the value provided in the yaml document
|
||||
func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) {
|
||||
suite.expectedConfig.Loglevel = "error"
|
||||
|
||||
os.Setenv("REGISTRY_LOGLEVEL", "error")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||
}
|
||||
|
||||
// TestParseInvalidLoglevel validates that the parser will fail to parse a
|
||||
// configuration if the loglevel is malformed
|
||||
func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) {
|
||||
invalidConfigYaml := "version: 0.1\nloglevel: derp\nstorage: inmemory"
|
||||
_, err := Parse(bytes.NewReader([]byte(invalidConfigYaml)))
|
||||
c.Assert(err, NotNil)
|
||||
|
||||
os.Setenv("REGISTRY_LOGLEVEL", "derp")
|
||||
|
||||
_, err = Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
c.Assert(err, NotNil)
|
||||
|
||||
}
|
||||
|
||||
// TestParseWithDifferentEnvReporting validates that environment variables
|
||||
// properly override reporting parameters
|
||||
func (suite *ConfigSuite) TestParseWithDifferentEnvReporting(c *C) {
|
||||
suite.expectedConfig.Reporting.Bugsnag.APIKey = "anotherBugsnagApiKey"
|
||||
suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080"
|
||||
suite.expectedConfig.Reporting.NewRelic.LicenseKey = "NewRelicLicenseKey"
|
||||
suite.expectedConfig.Reporting.NewRelic.Name = "some NewRelic NAME"
|
||||
|
||||
os.Setenv("REGISTRY_REPORTING_BUGSNAG_APIKEY", "anotherBugsnagApiKey")
|
||||
os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080")
|
||||
os.Setenv("REGISTRY_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey")
|
||||
os.Setenv("REGISTRY_REPORTING_NEWRELIC_NAME", "some NewRelic NAME")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||
}
|
||||
|
||||
// TestParseInvalidVersion validates that the parser will fail to parse a newer configuration
|
||||
// version than the CurrentVersion
|
||||
func (suite *ConfigSuite) TestParseInvalidVersion(c *C) {
|
||||
suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1)
|
||||
configBytes, err := yaml.Marshal(suite.expectedConfig)
|
||||
c.Assert(err, IsNil)
|
||||
_, err = Parse(bytes.NewReader(configBytes))
|
||||
c.Assert(err, NotNil)
|
||||
}
|
||||
|
||||
// TestParseExtraneousVars validates that environment variables referring to
|
||||
// nonexistent variables don't cause side effects.
|
||||
func (suite *ConfigSuite) TestParseExtraneousVars(c *C) {
|
||||
suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080"
|
||||
|
||||
// A valid environment variable
|
||||
os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080")
|
||||
|
||||
// Environment variables which shouldn't set config items
|
||||
os.Setenv("registry_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey")
|
||||
os.Setenv("REPORTING_NEWRELIC_NAME", "some NewRelic NAME")
|
||||
os.Setenv("REGISTRY_DUCKS", "quack")
|
||||
os.Setenv("REGISTRY_REPORTING_ASDF", "ghjk")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||
}
|
||||
|
||||
// TestParseEnvVarImplicitMaps validates that environment variables can set
|
||||
// values in maps that don't already exist.
|
||||
func (suite *ConfigSuite) TestParseEnvVarImplicitMaps(c *C) {
|
||||
readonly := make(map[string]interface{})
|
||||
readonly["enabled"] = true
|
||||
|
||||
maintenance := make(map[string]interface{})
|
||||
maintenance["readonly"] = readonly
|
||||
|
||||
suite.expectedConfig.Storage["maintenance"] = maintenance
|
||||
|
||||
os.Setenv("REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED", "true")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(config, DeepEquals, suite.expectedConfig)
|
||||
}
|
||||
|
||||
// TestParseEnvWrongTypeMap validates that incorrectly attempting to unmarshal a
|
||||
// string over existing map fails.
|
||||
func (suite *ConfigSuite) TestParseEnvWrongTypeMap(c *C) {
|
||||
os.Setenv("REGISTRY_STORAGE_S3", "somestring")
|
||||
|
||||
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
c.Assert(err, NotNil)
|
||||
}
|
||||
|
||||
// TestParseEnvWrongTypeStruct validates that incorrectly attempting to
|
||||
// unmarshal a string into a struct fails.
|
||||
func (suite *ConfigSuite) TestParseEnvWrongTypeStruct(c *C) {
|
||||
os.Setenv("REGISTRY_STORAGE_LOG", "somestring")
|
||||
|
||||
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
c.Assert(err, NotNil)
|
||||
}
|
||||
|
||||
// TestParseEnvWrongTypeSlice validates that incorrectly attempting to
|
||||
// unmarshal a string into a slice fails.
|
||||
func (suite *ConfigSuite) TestParseEnvWrongTypeSlice(c *C) {
|
||||
os.Setenv("REGISTRY_LOG_HOOKS", "somestring")
|
||||
|
||||
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
c.Assert(err, NotNil)
|
||||
}
|
||||
|
||||
// TestParseEnvMany tests several environment variable overrides.
|
||||
// The result is not checked - the goal of this test is to detect panics
|
||||
// from misuse of reflection.
|
||||
func (suite *ConfigSuite) TestParseEnvMany(c *C) {
|
||||
os.Setenv("REGISTRY_VERSION", "0.1")
|
||||
os.Setenv("REGISTRY_LOG_LEVEL", "debug")
|
||||
os.Setenv("REGISTRY_LOG_FORMATTER", "json")
|
||||
os.Setenv("REGISTRY_LOG_HOOKS", "json")
|
||||
os.Setenv("REGISTRY_LOG_FIELDS", "abc: xyz")
|
||||
os.Setenv("REGISTRY_LOG_HOOKS", "- type: asdf")
|
||||
os.Setenv("REGISTRY_LOGLEVEL", "debug")
|
||||
os.Setenv("REGISTRY_STORAGE", "s3")
|
||||
os.Setenv("REGISTRY_AUTH_PARAMS", "param1: value1")
|
||||
os.Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2")
|
||||
os.Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2")
|
||||
|
||||
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
func checkStructs(c *C, t reflect.Type, structsChecked map[string]struct{}) {
|
||||
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Map || t.Kind() == reflect.Slice {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Struct {
|
||||
return
|
||||
}
|
||||
if _, present := structsChecked[t.String()]; present {
|
||||
// Already checked this type
|
||||
return
|
||||
}
|
||||
|
||||
structsChecked[t.String()] = struct{}{}
|
||||
|
||||
byUpperCase := make(map[string]int)
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
sf := t.Field(i)
|
||||
|
||||
// Check that the yaml tag does not contain an _.
|
||||
yamlTag := sf.Tag.Get("yaml")
|
||||
if strings.Contains(yamlTag, "_") {
|
||||
c.Fatalf("yaml field name includes _ character: %s", yamlTag)
|
||||
}
|
||||
upper := strings.ToUpper(sf.Name)
|
||||
if _, present := byUpperCase[upper]; present {
|
||||
c.Fatalf("field name collision in configuration object: %s", sf.Name)
|
||||
}
|
||||
byUpperCase[upper] = i
|
||||
|
||||
checkStructs(c, sf.Type, structsChecked)
|
||||
}
|
||||
}
|
||||
|
||||
// TestValidateConfigStruct makes sure that the config struct has no members
|
||||
// with yaml tags that would be ambiguous to the environment variable parser.
|
||||
func (suite *ConfigSuite) TestValidateConfigStruct(c *C) {
|
||||
structsChecked := make(map[string]struct{})
|
||||
checkStructs(c, reflect.TypeOf(Configuration{}), structsChecked)
|
||||
}
|
||||
|
||||
func copyConfig(config Configuration) *Configuration {
|
||||
configCopy := new(Configuration)
|
||||
|
||||
configCopy.Version = MajorMinorVersion(config.Version.Major(), config.Version.Minor())
|
||||
configCopy.Loglevel = config.Loglevel
|
||||
configCopy.Log = config.Log
|
||||
configCopy.Log.Fields = make(map[string]interface{}, len(config.Log.Fields))
|
||||
for k, v := range config.Log.Fields {
|
||||
configCopy.Log.Fields[k] = v
|
||||
}
|
||||
|
||||
configCopy.Storage = Storage{config.Storage.Type(): Parameters{}}
|
||||
for k, v := range config.Storage.Parameters() {
|
||||
configCopy.Storage.setParameter(k, v)
|
||||
}
|
||||
configCopy.Reporting = Reporting{
|
||||
Bugsnag: BugsnagReporting{config.Reporting.Bugsnag.APIKey, config.Reporting.Bugsnag.ReleaseStage, config.Reporting.Bugsnag.Endpoint},
|
||||
NewRelic: NewRelicReporting{config.Reporting.NewRelic.LicenseKey, config.Reporting.NewRelic.Name, config.Reporting.NewRelic.Verbose},
|
||||
}
|
||||
|
||||
configCopy.Auth = Auth{config.Auth.Type(): Parameters{}}
|
||||
for k, v := range config.Auth.Parameters() {
|
||||
configCopy.Auth.setParameter(k, v)
|
||||
}
|
||||
|
||||
configCopy.Notifications = Notifications{Endpoints: []Endpoint{}}
|
||||
for _, v := range config.Notifications.Endpoints {
|
||||
configCopy.Notifications.Endpoints = append(configCopy.Notifications.Endpoints, v)
|
||||
}
|
||||
|
||||
configCopy.HTTP.Headers = make(http.Header)
|
||||
for k, v := range config.HTTP.Headers {
|
||||
configCopy.HTTP.Headers[k] = v
|
||||
}
|
||||
|
||||
return configCopy
|
||||
}
|
||||
283
vendor/github.com/docker/distribution/configuration/parser.go
generated
vendored
Normal file
283
vendor/github.com/docker/distribution/configuration/parser.go
generated
vendored
Normal file
@@ -0,0 +1,283 @@
|
||||
package configuration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Version is a major/minor version pair of the form Major.Minor
|
||||
// Major version upgrades indicate structure or type changes
|
||||
// Minor version upgrades should be strictly additive
|
||||
type Version string
|
||||
|
||||
// MajorMinorVersion constructs a Version from its Major and Minor components
|
||||
func MajorMinorVersion(major, minor uint) Version {
|
||||
return Version(fmt.Sprintf("%d.%d", major, minor))
|
||||
}
|
||||
|
||||
func (version Version) major() (uint, error) {
|
||||
majorPart := strings.Split(string(version), ".")[0]
|
||||
major, err := strconv.ParseUint(majorPart, 10, 0)
|
||||
return uint(major), err
|
||||
}
|
||||
|
||||
// Major returns the major version portion of a Version
|
||||
func (version Version) Major() uint {
|
||||
major, _ := version.major()
|
||||
return major
|
||||
}
|
||||
|
||||
func (version Version) minor() (uint, error) {
|
||||
minorPart := strings.Split(string(version), ".")[1]
|
||||
minor, err := strconv.ParseUint(minorPart, 10, 0)
|
||||
return uint(minor), err
|
||||
}
|
||||
|
||||
// Minor returns the minor version portion of a Version
|
||||
func (version Version) Minor() uint {
|
||||
minor, _ := version.minor()
|
||||
return minor
|
||||
}
|
||||
|
||||
// VersionedParseInfo defines how a specific version of a configuration should
|
||||
// be parsed into the current version
|
||||
type VersionedParseInfo struct {
|
||||
// Version is the version which this parsing information relates to
|
||||
Version Version
|
||||
// ParseAs defines the type which a configuration file of this version
|
||||
// should be parsed into
|
||||
ParseAs reflect.Type
|
||||
// ConversionFunc defines a method for converting the parsed configuration
|
||||
// (of type ParseAs) into the current configuration version
|
||||
// Note: this method signature is very unclear with the absence of generics
|
||||
ConversionFunc func(interface{}) (interface{}, error)
|
||||
}
|
||||
|
||||
type envVar struct {
|
||||
name string
|
||||
value string
|
||||
}
|
||||
|
||||
type envVars []envVar
|
||||
|
||||
func (a envVars) Len() int { return len(a) }
|
||||
func (a envVars) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a envVars) Less(i, j int) bool { return a[i].name < a[j].name }
|
||||
|
||||
// Parser can be used to parse a configuration file and environment of a defined
|
||||
// version into a unified output structure
|
||||
type Parser struct {
|
||||
prefix string
|
||||
mapping map[Version]VersionedParseInfo
|
||||
env envVars
|
||||
}
|
||||
|
||||
// NewParser returns a *Parser with the given environment prefix which handles
|
||||
// versioned configurations which match the given parseInfos
|
||||
func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser {
|
||||
p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo)}
|
||||
|
||||
for _, parseInfo := range parseInfos {
|
||||
p.mapping[parseInfo.Version] = parseInfo
|
||||
}
|
||||
|
||||
for _, env := range os.Environ() {
|
||||
envParts := strings.SplitN(env, "=", 2)
|
||||
p.env = append(p.env, envVar{envParts[0], envParts[1]})
|
||||
}
|
||||
|
||||
// We must sort the environment variables lexically by name so that
|
||||
// more specific variables are applied before less specific ones
|
||||
// (i.e. REGISTRY_STORAGE before
|
||||
// REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY). This sucks, but it's a
|
||||
// lot simpler and easier to get right than unmarshalling map entries
|
||||
// into temporaries and merging with the existing entry.
|
||||
sort.Sort(p.env)
|
||||
|
||||
return &p
|
||||
}
|
||||
|
||||
// Parse reads in the given []byte and environment and writes the resulting
|
||||
// configuration into the input v
|
||||
//
|
||||
// Environment variables may be used to override configuration parameters other
|
||||
// than version, following the scheme below:
|
||||
// v.Abc may be replaced by the value of PREFIX_ABC,
|
||||
// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth
|
||||
func (p *Parser) Parse(in []byte, v interface{}) error {
|
||||
var versionedStruct struct {
|
||||
Version Version
|
||||
}
|
||||
|
||||
if err := yaml.Unmarshal(in, &versionedStruct); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parseInfo, ok := p.mapping[versionedStruct.Version]
|
||||
if !ok {
|
||||
return fmt.Errorf("Unsupported version: %q", versionedStruct.Version)
|
||||
}
|
||||
|
||||
parseAs := reflect.New(parseInfo.ParseAs)
|
||||
err := yaml.Unmarshal(in, parseAs.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, envVar := range p.env {
|
||||
pathStr := envVar.name
|
||||
if strings.HasPrefix(pathStr, strings.ToUpper(p.prefix)+"_") {
|
||||
path := strings.Split(pathStr, "_")
|
||||
|
||||
err = p.overwriteFields(parseAs, pathStr, path[1:], envVar.value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c, err := parseInfo.ConversionFunc(parseAs.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// overwriteFields replaces configuration values with alternate values specified
|
||||
// through the environment. Precondition: an empty path slice must never be
|
||||
// passed in.
|
||||
func (p *Parser) overwriteFields(v reflect.Value, fullpath string, path []string, payload string) error {
|
||||
for v.Kind() == reflect.Ptr {
|
||||
if v.IsNil() {
|
||||
panic("encountered nil pointer while handling environment variable " + fullpath)
|
||||
}
|
||||
v = reflect.Indirect(v)
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
return p.overwriteStruct(v, fullpath, path, payload)
|
||||
case reflect.Map:
|
||||
return p.overwriteMap(v, fullpath, path, payload)
|
||||
case reflect.Interface:
|
||||
if v.NumMethod() == 0 {
|
||||
if !v.IsNil() {
|
||||
return p.overwriteFields(v.Elem(), fullpath, path, payload)
|
||||
}
|
||||
// Interface was empty; create an implicit map
|
||||
var template map[string]interface{}
|
||||
wrappedV := reflect.MakeMap(reflect.TypeOf(template))
|
||||
v.Set(wrappedV)
|
||||
return p.overwriteMap(wrappedV, fullpath, path, payload)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Parser) overwriteStruct(v reflect.Value, fullpath string, path []string, payload string) error {
|
||||
// Generate case-insensitive map of struct fields
|
||||
byUpperCase := make(map[string]int)
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
sf := v.Type().Field(i)
|
||||
upper := strings.ToUpper(sf.Name)
|
||||
if _, present := byUpperCase[upper]; present {
|
||||
panic(fmt.Sprintf("field name collision in configuration object: %s", sf.Name))
|
||||
}
|
||||
byUpperCase[upper] = i
|
||||
}
|
||||
|
||||
fieldIndex, present := byUpperCase[path[0]]
|
||||
if !present {
|
||||
logrus.Warnf("Ignoring unrecognized environment variable %s", fullpath)
|
||||
return nil
|
||||
}
|
||||
field := v.Field(fieldIndex)
|
||||
sf := v.Type().Field(fieldIndex)
|
||||
|
||||
if len(path) == 1 {
|
||||
// Env var specifies this field directly
|
||||
fieldVal := reflect.New(sf.Type)
|
||||
err := yaml.Unmarshal([]byte(payload), fieldVal.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
field.Set(reflect.Indirect(fieldVal))
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the field is nil, must create an object
|
||||
switch sf.Type.Kind() {
|
||||
case reflect.Map:
|
||||
if field.IsNil() {
|
||||
field.Set(reflect.MakeMap(sf.Type))
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if field.IsNil() {
|
||||
field.Set(reflect.New(sf.Type))
|
||||
}
|
||||
}
|
||||
|
||||
err := p.overwriteFields(field, fullpath, path[1:], payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Parser) overwriteMap(m reflect.Value, fullpath string, path []string, payload string) error {
|
||||
if m.Type().Key().Kind() != reflect.String {
|
||||
// non-string keys unsupported
|
||||
logrus.Warnf("Ignoring environment variable %s involving map with non-string keys", fullpath)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(path) > 1 {
|
||||
// If a matching key exists, get its value and continue the
|
||||
// overwriting process.
|
||||
for _, k := range m.MapKeys() {
|
||||
if strings.ToUpper(k.String()) == path[0] {
|
||||
mapValue := m.MapIndex(k)
|
||||
// If the existing value is nil, we want to
|
||||
// recreate it instead of using this value.
|
||||
if (mapValue.Kind() == reflect.Ptr ||
|
||||
mapValue.Kind() == reflect.Interface ||
|
||||
mapValue.Kind() == reflect.Map) &&
|
||||
mapValue.IsNil() {
|
||||
break
|
||||
}
|
||||
return p.overwriteFields(mapValue, fullpath, path[1:], payload)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// (Re)create this key
|
||||
var mapValue reflect.Value
|
||||
if m.Type().Elem().Kind() == reflect.Map {
|
||||
mapValue = reflect.MakeMap(m.Type().Elem())
|
||||
} else {
|
||||
mapValue = reflect.New(m.Type().Elem())
|
||||
}
|
||||
if len(path) > 1 {
|
||||
err := p.overwriteFields(mapValue, fullpath, path[1:], payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err := yaml.Unmarshal([]byte(payload), mapValue.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
m.SetMapIndex(reflect.ValueOf(strings.ToLower(path[0])), reflect.Indirect(mapValue))
|
||||
|
||||
return nil
|
||||
}
|
||||
85
vendor/github.com/docker/distribution/context/context.go
generated
vendored
Normal file
85
vendor/github.com/docker/distribution/context/context.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Context is a copy of Context from the golang.org/x/net/context package.
|
||||
type Context interface {
|
||||
context.Context
|
||||
}
|
||||
|
||||
// instanceContext is a context that provides only an instance id. It is
|
||||
// provided as the main background context.
|
||||
type instanceContext struct {
|
||||
Context
|
||||
id string // id of context, logged as "instance.id"
|
||||
once sync.Once // once protect generation of the id
|
||||
}
|
||||
|
||||
func (ic *instanceContext) Value(key interface{}) interface{} {
|
||||
if key == "instance.id" {
|
||||
ic.once.Do(func() {
|
||||
// We want to lazy initialize the UUID such that we don't
|
||||
// call a random generator from the package initialization
|
||||
// code. For various reasons random could not be available
|
||||
// https://github.com/docker/distribution/issues/782
|
||||
ic.id = uuid.Generate().String()
|
||||
})
|
||||
return ic.id
|
||||
}
|
||||
|
||||
return ic.Context.Value(key)
|
||||
}
|
||||
|
||||
var background = &instanceContext{
|
||||
Context: context.Background(),
|
||||
}
|
||||
|
||||
// Background returns a non-nil, empty Context. The background context
|
||||
// provides a single key, "instance.id" that is globally unique to the
|
||||
// process.
|
||||
func Background() Context {
|
||||
return background
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val. Use context Values only for request-scoped data that transits processes
|
||||
// and APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key, val interface{}) Context {
|
||||
return context.WithValue(parent, key, val)
|
||||
}
|
||||
|
||||
// stringMapContext is a simple context implementation that checks a map for a
|
||||
// key, falling back to a parent if not present.
|
||||
type stringMapContext struct {
|
||||
context.Context
|
||||
m map[string]interface{}
|
||||
}
|
||||
|
||||
// WithValues returns a context that proxies lookups through a map. Only
|
||||
// supports string keys.
|
||||
func WithValues(ctx context.Context, m map[string]interface{}) context.Context {
|
||||
mo := make(map[string]interface{}, len(m)) // make our own copy.
|
||||
for k, v := range m {
|
||||
mo[k] = v
|
||||
}
|
||||
|
||||
return stringMapContext{
|
||||
Context: ctx,
|
||||
m: mo,
|
||||
}
|
||||
}
|
||||
|
||||
func (smc stringMapContext) Value(key interface{}) interface{} {
|
||||
if ks, ok := key.(string); ok {
|
||||
if v, ok := smc.m[ks]; ok {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return smc.Context.Value(key)
|
||||
}
|
||||
89
vendor/github.com/docker/distribution/context/doc.go
generated
vendored
Normal file
89
vendor/github.com/docker/distribution/context/doc.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
// Package context provides several utilities for working with
|
||||
// golang.org/x/net/context in http requests. Primarily, the focus is on
|
||||
// logging relevant request information but this package is not limited to
|
||||
// that purpose.
|
||||
//
|
||||
// The easiest way to get started is to get the background context:
|
||||
//
|
||||
// ctx := context.Background()
|
||||
//
|
||||
// The returned context should be passed around your application and be the
|
||||
// root of all other context instances. If the application has a version, this
|
||||
// line should be called before anything else:
|
||||
//
|
||||
// ctx := context.WithVersion(context.Background(), version)
|
||||
//
|
||||
// The above will store the version in the context and will be available to
|
||||
// the logger.
|
||||
//
|
||||
// Logging
|
||||
//
|
||||
// The most useful aspect of this package is GetLogger. This function takes
|
||||
// any context.Context interface and returns the current logger from the
|
||||
// context. Canonical usage looks like this:
|
||||
//
|
||||
// GetLogger(ctx).Infof("something interesting happened")
|
||||
//
|
||||
// GetLogger also takes optional key arguments. The keys will be looked up in
|
||||
// the context and reported with the logger. The following example would
|
||||
// return a logger that prints the version with each log message:
|
||||
//
|
||||
// ctx := context.Context(context.Background(), "version", version)
|
||||
// GetLogger(ctx, "version").Infof("this log message has a version field")
|
||||
//
|
||||
// The above would print out a log message like this:
|
||||
//
|
||||
// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
|
||||
//
|
||||
// When used with WithLogger, we gain the ability to decorate the context with
|
||||
// loggers that have information from disparate parts of the call stack.
|
||||
// Following from the version example, we can build a new context with the
|
||||
// configured logger such that we always print the version field:
|
||||
//
|
||||
// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
|
||||
//
|
||||
// Since the logger has been pushed to the context, we can now get the version
|
||||
// field for free with our log messages. Future calls to GetLogger on the new
|
||||
// context will have the version field:
|
||||
//
|
||||
// GetLogger(ctx).Infof("this log message has a version field")
|
||||
//
|
||||
// This becomes more powerful when we start stacking loggers. Let's say we
|
||||
// have the version logger from above but also want a request id. Using the
|
||||
// context above, in our request scoped function, we place another logger in
|
||||
// the context:
|
||||
//
|
||||
// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
|
||||
// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
|
||||
//
|
||||
// When GetLogger is called on the new context, "http.request.id" will be
|
||||
// included as a logger field, along with the original "version" field:
|
||||
//
|
||||
// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
|
||||
//
|
||||
// Note that this only affects the new context, the previous context, with the
|
||||
// version field, can be used independently. Put another way, the new logger,
|
||||
// added to the request context, is unique to that context and can have
|
||||
// request scoped variables.
|
||||
//
|
||||
// HTTP Requests
|
||||
//
|
||||
// This package also contains several methods for working with http requests.
|
||||
// The concepts are very similar to those described above. We simply place the
|
||||
// request in the context using WithRequest. This makes the request variables
|
||||
// available. GetRequestLogger can then be called to get request specific
|
||||
// variables in a log line:
|
||||
//
|
||||
// ctx = WithRequest(ctx, req)
|
||||
// GetRequestLogger(ctx).Infof("request variables")
|
||||
//
|
||||
// Like above, if we want to include the request data in all log messages in
|
||||
// the context, we push the logger to a new context and use that one:
|
||||
//
|
||||
// ctx = WithLogger(ctx, GetRequestLogger(ctx))
|
||||
//
|
||||
// The concept is fairly powerful and ensures that calls throughout the stack
|
||||
// can be traced in log messages. Using the fields like "http.request.id", one
|
||||
// can analyze call flow for a particular request with a simple grep of the
|
||||
// logs.
|
||||
package context
|
||||
366
vendor/github.com/docker/distribution/context/http.go
generated
vendored
Normal file
366
vendor/github.com/docker/distribution/context/http.go
generated
vendored
Normal file
@@ -0,0 +1,366 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Common errors used with this package.
|
||||
var (
|
||||
ErrNoRequestContext = errors.New("no http request in context")
|
||||
ErrNoResponseWriterContext = errors.New("no http response in context")
|
||||
)
|
||||
|
||||
func parseIP(ipStr string) net.IP {
|
||||
ip := net.ParseIP(ipStr)
|
||||
if ip == nil {
|
||||
log.Warnf("invalid remote IP address: %q", ipStr)
|
||||
}
|
||||
return ip
|
||||
}
|
||||
|
||||
// RemoteAddr extracts the remote address of the request, taking into
|
||||
// account proxy headers.
|
||||
func RemoteAddr(r *http.Request) string {
|
||||
if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
|
||||
proxies := strings.Split(prior, ",")
|
||||
if len(proxies) > 0 {
|
||||
remoteAddr := strings.Trim(proxies[0], " ")
|
||||
if parseIP(remoteAddr) != nil {
|
||||
return remoteAddr
|
||||
}
|
||||
}
|
||||
}
|
||||
// X-Real-Ip is less supported, but worth checking in the
|
||||
// absence of X-Forwarded-For
|
||||
if realIP := r.Header.Get("X-Real-Ip"); realIP != "" {
|
||||
if parseIP(realIP) != nil {
|
||||
return realIP
|
||||
}
|
||||
}
|
||||
|
||||
return r.RemoteAddr
|
||||
}
|
||||
|
||||
// RemoteIP extracts the remote IP of the request, taking into
|
||||
// account proxy headers.
|
||||
func RemoteIP(r *http.Request) string {
|
||||
addr := RemoteAddr(r)
|
||||
|
||||
// Try parsing it as "IP:port"
|
||||
if ip, _, err := net.SplitHostPort(addr); err == nil {
|
||||
return ip
|
||||
}
|
||||
|
||||
return addr
|
||||
}
|
||||
|
||||
// WithRequest places the request on the context. The context of the request
|
||||
// is assigned a unique id, available at "http.request.id". The request itself
|
||||
// is available at "http.request". Other common attributes are available under
|
||||
// the prefix "http.request.". If a request is already present on the context,
|
||||
// this method will panic.
|
||||
func WithRequest(ctx Context, r *http.Request) Context {
|
||||
if ctx.Value("http.request") != nil {
|
||||
// NOTE(stevvooe): This needs to be considered a programming error. It
|
||||
// is unlikely that we'd want to have more than one request in
|
||||
// context.
|
||||
panic("only one request per context")
|
||||
}
|
||||
|
||||
return &httpRequestContext{
|
||||
Context: ctx,
|
||||
startedAt: time.Now(),
|
||||
id: uuid.Generate().String(),
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
// GetRequest returns the http request in the given context. Returns
|
||||
// ErrNoRequestContext if the context does not have an http request associated
|
||||
// with it.
|
||||
func GetRequest(ctx Context) (*http.Request, error) {
|
||||
if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok {
|
||||
return r, nil
|
||||
}
|
||||
return nil, ErrNoRequestContext
|
||||
}
|
||||
|
||||
// GetRequestID attempts to resolve the current request id, if possible. An
|
||||
// error is return if it is not available on the context.
|
||||
func GetRequestID(ctx Context) string {
|
||||
return GetStringValue(ctx, "http.request.id")
|
||||
}
|
||||
|
||||
// WithResponseWriter returns a new context and response writer that makes
|
||||
// interesting response statistics available within the context.
|
||||
func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
|
||||
if closeNotifier, ok := w.(http.CloseNotifier); ok {
|
||||
irwCN := &instrumentedResponseWriterCN{
|
||||
instrumentedResponseWriter: instrumentedResponseWriter{
|
||||
ResponseWriter: w,
|
||||
Context: ctx,
|
||||
},
|
||||
CloseNotifier: closeNotifier,
|
||||
}
|
||||
|
||||
return irwCN, irwCN
|
||||
}
|
||||
|
||||
irw := instrumentedResponseWriter{
|
||||
ResponseWriter: w,
|
||||
Context: ctx,
|
||||
}
|
||||
return &irw, &irw
|
||||
}
|
||||
|
||||
// GetResponseWriter returns the http.ResponseWriter from the provided
|
||||
// context. If not present, ErrNoResponseWriterContext is returned. The
|
||||
// returned instance provides instrumentation in the context.
|
||||
func GetResponseWriter(ctx Context) (http.ResponseWriter, error) {
|
||||
v := ctx.Value("http.response")
|
||||
|
||||
rw, ok := v.(http.ResponseWriter)
|
||||
if !ok || rw == nil {
|
||||
return nil, ErrNoResponseWriterContext
|
||||
}
|
||||
|
||||
return rw, nil
|
||||
}
|
||||
|
||||
// getVarsFromRequest let's us change request vars implementation for testing
|
||||
// and maybe future changes.
|
||||
var getVarsFromRequest = mux.Vars
|
||||
|
||||
// WithVars extracts gorilla/mux vars and makes them available on the returned
|
||||
// context. Variables are available at keys with the prefix "vars.". For
|
||||
// example, if looking for the variable "name", it can be accessed as
|
||||
// "vars.name". Implementations that are accessing values need not know that
|
||||
// the underlying context is implemented with gorilla/mux vars.
|
||||
func WithVars(ctx Context, r *http.Request) Context {
|
||||
return &muxVarsContext{
|
||||
Context: ctx,
|
||||
vars: getVarsFromRequest(r),
|
||||
}
|
||||
}
|
||||
|
||||
// GetRequestLogger returns a logger that contains fields from the request in
|
||||
// the current context. If the request is not available in the context, no
|
||||
// fields will display. Request loggers can safely be pushed onto the context.
|
||||
func GetRequestLogger(ctx Context) Logger {
|
||||
return GetLogger(ctx,
|
||||
"http.request.id",
|
||||
"http.request.method",
|
||||
"http.request.host",
|
||||
"http.request.uri",
|
||||
"http.request.referer",
|
||||
"http.request.useragent",
|
||||
"http.request.remoteaddr",
|
||||
"http.request.contenttype")
|
||||
}
|
||||
|
||||
// GetResponseLogger reads the current response stats and builds a logger.
|
||||
// Because the values are read at call time, pushing a logger returned from
|
||||
// this function on the context will lead to missing or invalid data. Only
|
||||
// call this at the end of a request, after the response has been written.
|
||||
func GetResponseLogger(ctx Context) Logger {
|
||||
l := getLogrusLogger(ctx,
|
||||
"http.response.written",
|
||||
"http.response.status",
|
||||
"http.response.contenttype")
|
||||
|
||||
duration := Since(ctx, "http.request.startedat")
|
||||
|
||||
if duration > 0 {
|
||||
l = l.WithField("http.response.duration", duration.String())
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// httpRequestContext makes information about a request available to context.
|
||||
type httpRequestContext struct {
|
||||
Context
|
||||
|
||||
startedAt time.Time
|
||||
id string
|
||||
r *http.Request
|
||||
}
|
||||
|
||||
// Value returns a keyed element of the request for use in the context. To get
|
||||
// the request itself, query "request". For other components, access them as
|
||||
// "request.<component>". For example, r.RequestURI
|
||||
func (ctx *httpRequestContext) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "http.request" {
|
||||
return ctx.r
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(keyStr, "http.request.") {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
parts := strings.Split(keyStr, ".")
|
||||
|
||||
if len(parts) != 3 {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
switch parts[2] {
|
||||
case "uri":
|
||||
return ctx.r.RequestURI
|
||||
case "remoteaddr":
|
||||
return RemoteAddr(ctx.r)
|
||||
case "method":
|
||||
return ctx.r.Method
|
||||
case "host":
|
||||
return ctx.r.Host
|
||||
case "referer":
|
||||
referer := ctx.r.Referer()
|
||||
if referer != "" {
|
||||
return referer
|
||||
}
|
||||
case "useragent":
|
||||
return ctx.r.UserAgent()
|
||||
case "id":
|
||||
return ctx.id
|
||||
case "startedat":
|
||||
return ctx.startedAt
|
||||
case "contenttype":
|
||||
ct := ctx.r.Header.Get("Content-Type")
|
||||
if ct != "" {
|
||||
return ct
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fallback:
|
||||
return ctx.Context.Value(key)
|
||||
}
|
||||
|
||||
type muxVarsContext struct {
|
||||
Context
|
||||
vars map[string]string
|
||||
}
|
||||
|
||||
func (ctx *muxVarsContext) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "vars" {
|
||||
return ctx.vars
|
||||
}
|
||||
|
||||
if strings.HasPrefix(keyStr, "vars.") {
|
||||
keyStr = strings.TrimPrefix(keyStr, "vars.")
|
||||
}
|
||||
|
||||
if v, ok := ctx.vars[keyStr]; ok {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return ctx.Context.Value(key)
|
||||
}
|
||||
|
||||
// instrumentedResponseWriterCN provides response writer information in a
|
||||
// context. It implements http.CloseNotifier so that users can detect
|
||||
// early disconnects.
|
||||
type instrumentedResponseWriterCN struct {
|
||||
instrumentedResponseWriter
|
||||
http.CloseNotifier
|
||||
}
|
||||
|
||||
// instrumentedResponseWriter provides response writer information in a
|
||||
// context. This variant is only used in the case where CloseNotifier is not
|
||||
// implemented by the parent ResponseWriter.
|
||||
type instrumentedResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
Context
|
||||
|
||||
mu sync.Mutex
|
||||
status int
|
||||
written int64
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = irw.ResponseWriter.Write(p)
|
||||
|
||||
irw.mu.Lock()
|
||||
irw.written += int64(n)
|
||||
|
||||
// Guess the likely status if not set.
|
||||
if irw.status == 0 {
|
||||
irw.status = http.StatusOK
|
||||
}
|
||||
|
||||
irw.mu.Unlock()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) WriteHeader(status int) {
|
||||
irw.ResponseWriter.WriteHeader(status)
|
||||
|
||||
irw.mu.Lock()
|
||||
irw.status = status
|
||||
irw.mu.Unlock()
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) Flush() {
|
||||
if flusher, ok := irw.ResponseWriter.(http.Flusher); ok {
|
||||
flusher.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "http.response" {
|
||||
return irw
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(keyStr, "http.response.") {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
parts := strings.Split(keyStr, ".")
|
||||
|
||||
if len(parts) != 3 {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
irw.mu.Lock()
|
||||
defer irw.mu.Unlock()
|
||||
|
||||
switch parts[2] {
|
||||
case "written":
|
||||
return irw.written
|
||||
case "status":
|
||||
return irw.status
|
||||
case "contenttype":
|
||||
contentType := irw.Header().Get("Content-Type")
|
||||
if contentType != "" {
|
||||
return contentType
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fallback:
|
||||
return irw.Context.Value(key)
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "http.response" {
|
||||
return irw
|
||||
}
|
||||
}
|
||||
|
||||
return irw.instrumentedResponseWriter.Value(key)
|
||||
}
|
||||
285
vendor/github.com/docker/distribution/context/http_test.go
generated
vendored
Normal file
285
vendor/github.com/docker/distribution/context/http_test.go
generated
vendored
Normal file
@@ -0,0 +1,285 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestWithRequest(t *testing.T) {
|
||||
var req http.Request
|
||||
|
||||
start := time.Now()
|
||||
req.Method = "GET"
|
||||
req.Host = "example.com"
|
||||
req.RequestURI = "/test-test"
|
||||
req.Header = make(http.Header)
|
||||
req.Header.Set("Referer", "foo.com/referer")
|
||||
req.Header.Set("User-Agent", "test/0.1")
|
||||
|
||||
ctx := WithRequest(Background(), &req)
|
||||
for _, testcase := range []struct {
|
||||
key string
|
||||
expected interface{}
|
||||
}{
|
||||
{
|
||||
key: "http.request",
|
||||
expected: &req,
|
||||
},
|
||||
{
|
||||
key: "http.request.id",
|
||||
},
|
||||
{
|
||||
key: "http.request.method",
|
||||
expected: req.Method,
|
||||
},
|
||||
{
|
||||
key: "http.request.host",
|
||||
expected: req.Host,
|
||||
},
|
||||
{
|
||||
key: "http.request.uri",
|
||||
expected: req.RequestURI,
|
||||
},
|
||||
{
|
||||
key: "http.request.referer",
|
||||
expected: req.Referer(),
|
||||
},
|
||||
{
|
||||
key: "http.request.useragent",
|
||||
expected: req.UserAgent(),
|
||||
},
|
||||
{
|
||||
key: "http.request.remoteaddr",
|
||||
expected: req.RemoteAddr,
|
||||
},
|
||||
{
|
||||
key: "http.request.startedat",
|
||||
},
|
||||
} {
|
||||
v := ctx.Value(testcase.key)
|
||||
|
||||
if v == nil {
|
||||
t.Fatalf("value not found for %q", testcase.key)
|
||||
}
|
||||
|
||||
if testcase.expected != nil && v != testcase.expected {
|
||||
t.Fatalf("%s: %v != %v", testcase.key, v, testcase.expected)
|
||||
}
|
||||
|
||||
// Key specific checks!
|
||||
switch testcase.key {
|
||||
case "http.request.id":
|
||||
if _, ok := v.(string); !ok {
|
||||
t.Fatalf("request id not a string: %v", v)
|
||||
}
|
||||
case "http.request.startedat":
|
||||
vt, ok := v.(time.Time)
|
||||
if !ok {
|
||||
t.Fatalf("value not a time: %v", v)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
if vt.After(now) {
|
||||
t.Fatalf("time generated too late: %v > %v", vt, now)
|
||||
}
|
||||
|
||||
if vt.Before(start) {
|
||||
t.Fatalf("time generated too early: %v < %v", vt, start)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testResponseWriter struct {
|
||||
flushed bool
|
||||
status int
|
||||
written int64
|
||||
header http.Header
|
||||
}
|
||||
|
||||
func (trw *testResponseWriter) Header() http.Header {
|
||||
if trw.header == nil {
|
||||
trw.header = make(http.Header)
|
||||
}
|
||||
|
||||
return trw.header
|
||||
}
|
||||
|
||||
func (trw *testResponseWriter) Write(p []byte) (n int, err error) {
|
||||
if trw.status == 0 {
|
||||
trw.status = http.StatusOK
|
||||
}
|
||||
|
||||
n = len(p)
|
||||
trw.written += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (trw *testResponseWriter) WriteHeader(status int) {
|
||||
trw.status = status
|
||||
}
|
||||
|
||||
func (trw *testResponseWriter) Flush() {
|
||||
trw.flushed = true
|
||||
}
|
||||
|
||||
func TestWithResponseWriter(t *testing.T) {
|
||||
trw := testResponseWriter{}
|
||||
ctx, rw := WithResponseWriter(Background(), &trw)
|
||||
|
||||
if ctx.Value("http.response") != rw {
|
||||
t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), rw)
|
||||
}
|
||||
|
||||
grw, err := GetResponseWriter(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("error getting response writer: %v", err)
|
||||
}
|
||||
|
||||
if grw != rw {
|
||||
t.Fatalf("unexpected response writer returned: %#v != %#v", grw, rw)
|
||||
}
|
||||
|
||||
if ctx.Value("http.response.status") != 0 {
|
||||
t.Fatalf("response status should always be a number and should be zero here: %v != 0", ctx.Value("http.response.status"))
|
||||
}
|
||||
|
||||
if n, err := rw.Write(make([]byte, 1024)); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
} else if n != 1024 {
|
||||
t.Fatalf("unexpected number of bytes written: %v != %v", n, 1024)
|
||||
}
|
||||
|
||||
if ctx.Value("http.response.status") != http.StatusOK {
|
||||
t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusOK)
|
||||
}
|
||||
|
||||
if ctx.Value("http.response.written") != int64(1024) {
|
||||
t.Fatalf("unexpected number reported bytes written: %v != %v", ctx.Value("http.response.written"), 1024)
|
||||
}
|
||||
|
||||
// Make sure flush propagates
|
||||
rw.(http.Flusher).Flush()
|
||||
|
||||
if !trw.flushed {
|
||||
t.Fatalf("response writer not flushed")
|
||||
}
|
||||
|
||||
// Write another status and make sure context is correct. This normally
|
||||
// wouldn't work except for in this contrived testcase.
|
||||
rw.WriteHeader(http.StatusBadRequest)
|
||||
|
||||
if ctx.Value("http.response.status") != http.StatusBadRequest {
|
||||
t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithVars(t *testing.T) {
|
||||
var req http.Request
|
||||
vars := map[string]string{
|
||||
"foo": "asdf",
|
||||
"bar": "qwer",
|
||||
}
|
||||
|
||||
getVarsFromRequest = func(r *http.Request) map[string]string {
|
||||
if r != &req {
|
||||
t.Fatalf("unexpected request: %v != %v", r, req)
|
||||
}
|
||||
|
||||
return vars
|
||||
}
|
||||
|
||||
ctx := WithVars(Background(), &req)
|
||||
for _, testcase := range []struct {
|
||||
key string
|
||||
expected interface{}
|
||||
}{
|
||||
{
|
||||
key: "vars",
|
||||
expected: vars,
|
||||
},
|
||||
{
|
||||
key: "vars.foo",
|
||||
expected: "asdf",
|
||||
},
|
||||
{
|
||||
key: "vars.bar",
|
||||
expected: "qwer",
|
||||
},
|
||||
} {
|
||||
v := ctx.Value(testcase.key)
|
||||
|
||||
if !reflect.DeepEqual(v, testcase.expected) {
|
||||
t.Fatalf("%q: %v != %v", testcase.key, v, testcase.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SingleHostReverseProxy will insert an X-Forwarded-For header, and can be used to test
|
||||
// RemoteAddr(). A fake RemoteAddr cannot be set on the HTTP request - it is overwritten
|
||||
// at the transport layer to 127.0.0.1:<port> . However, as the X-Forwarded-For header
|
||||
// just contains the IP address, it is different enough for testing.
|
||||
func TestRemoteAddr(t *testing.T) {
|
||||
var expectedRemote string
|
||||
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
|
||||
if r.RemoteAddr == expectedRemote {
|
||||
t.Errorf("Unexpected matching remote addresses")
|
||||
}
|
||||
|
||||
actualRemote := RemoteAddr(r)
|
||||
if expectedRemote != actualRemote {
|
||||
t.Errorf("Mismatching remote hosts: %v != %v", expectedRemote, actualRemote)
|
||||
}
|
||||
|
||||
w.WriteHeader(200)
|
||||
}))
|
||||
|
||||
defer backend.Close()
|
||||
backendURL, err := url.Parse(backend.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
proxy := httputil.NewSingleHostReverseProxy(backendURL)
|
||||
frontend := httptest.NewServer(proxy)
|
||||
defer frontend.Close()
|
||||
|
||||
// X-Forwarded-For set by proxy
|
||||
expectedRemote = "127.0.0.1"
|
||||
proxyReq, err := http.NewRequest("GET", frontend.URL, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = http.DefaultClient.Do(proxyReq)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// RemoteAddr in X-Real-Ip
|
||||
getReq, err := http.NewRequest("GET", backend.URL, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedRemote = "1.2.3.4"
|
||||
getReq.Header["X-Real-ip"] = []string{expectedRemote}
|
||||
_, err = http.DefaultClient.Do(getReq)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Valid X-Real-Ip and invalid X-Forwarded-For
|
||||
getReq.Header["X-forwarded-for"] = []string{"1.2.3"}
|
||||
_, err = http.DefaultClient.Do(getReq)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
116
vendor/github.com/docker/distribution/context/logger.go
generated
vendored
Normal file
116
vendor/github.com/docker/distribution/context/logger.go
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Logger provides a leveled-logging interface.
|
||||
type Logger interface {
|
||||
// standard logger methods
|
||||
Print(args ...interface{})
|
||||
Printf(format string, args ...interface{})
|
||||
Println(args ...interface{})
|
||||
|
||||
Fatal(args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Fatalln(args ...interface{})
|
||||
|
||||
Panic(args ...interface{})
|
||||
Panicf(format string, args ...interface{})
|
||||
Panicln(args ...interface{})
|
||||
|
||||
// Leveled methods, from logrus
|
||||
Debug(args ...interface{})
|
||||
Debugf(format string, args ...interface{})
|
||||
Debugln(args ...interface{})
|
||||
|
||||
Error(args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
Errorln(args ...interface{})
|
||||
|
||||
Info(args ...interface{})
|
||||
Infof(format string, args ...interface{})
|
||||
Infoln(args ...interface{})
|
||||
|
||||
Warn(args ...interface{})
|
||||
Warnf(format string, args ...interface{})
|
||||
Warnln(args ...interface{})
|
||||
}
|
||||
|
||||
// WithLogger creates a new context with provided logger.
|
||||
func WithLogger(ctx Context, logger Logger) Context {
|
||||
return WithValue(ctx, "logger", logger)
|
||||
}
|
||||
|
||||
// GetLoggerWithField returns a logger instance with the specified field key
|
||||
// and value without affecting the context. Extra specified keys will be
|
||||
// resolved from the context.
|
||||
func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger {
|
||||
return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value)
|
||||
}
|
||||
|
||||
// GetLoggerWithFields returns a logger instance with the specified fields
|
||||
// without affecting the context. Extra specified keys will be resolved from
|
||||
// the context.
|
||||
func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger {
|
||||
// must convert from interface{} -> interface{} to string -> interface{} for logrus.
|
||||
lfields := make(logrus.Fields, len(fields))
|
||||
for key, value := range fields {
|
||||
lfields[fmt.Sprint(key)] = value
|
||||
}
|
||||
|
||||
return getLogrusLogger(ctx, keys...).WithFields(lfields)
|
||||
}
|
||||
|
||||
// GetLogger returns the logger from the current context, if present. If one
|
||||
// or more keys are provided, they will be resolved on the context and
|
||||
// included in the logger. While context.Value takes an interface, any key
|
||||
// argument passed to GetLogger will be passed to fmt.Sprint when expanded as
|
||||
// a logging key field. If context keys are integer constants, for example,
|
||||
// its recommended that a String method is implemented.
|
||||
func GetLogger(ctx Context, keys ...interface{}) Logger {
|
||||
return getLogrusLogger(ctx, keys...)
|
||||
}
|
||||
|
||||
// GetLogrusLogger returns the logrus logger for the context. If one more keys
|
||||
// are provided, they will be resolved on the context and included in the
|
||||
// logger. Only use this function if specific logrus functionality is
|
||||
// required.
|
||||
func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry {
|
||||
var logger *logrus.Entry
|
||||
|
||||
// Get a logger, if it is present.
|
||||
loggerInterface := ctx.Value("logger")
|
||||
if loggerInterface != nil {
|
||||
if lgr, ok := loggerInterface.(*logrus.Entry); ok {
|
||||
logger = lgr
|
||||
}
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
fields := logrus.Fields{}
|
||||
|
||||
// Fill in the instance id, if we have it.
|
||||
instanceID := ctx.Value("instance.id")
|
||||
if instanceID != nil {
|
||||
fields["instance.id"] = instanceID
|
||||
}
|
||||
|
||||
fields["go.version"] = runtime.Version()
|
||||
// If no logger is found, just return the standard logger.
|
||||
logger = logrus.StandardLogger().WithFields(fields)
|
||||
}
|
||||
|
||||
fields := logrus.Fields{}
|
||||
for _, key := range keys {
|
||||
v := ctx.Value(key)
|
||||
if v != nil {
|
||||
fields[fmt.Sprint(key)] = v
|
||||
}
|
||||
}
|
||||
|
||||
return logger.WithFields(fields)
|
||||
}
|
||||
104
vendor/github.com/docker/distribution/context/trace.go
generated
vendored
Normal file
104
vendor/github.com/docker/distribution/context/trace.go
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
)
|
||||
|
||||
// WithTrace allocates a traced timing span in a new context. This allows a
|
||||
// caller to track the time between calling WithTrace and the returned done
|
||||
// function. When the done function is called, a log message is emitted with a
|
||||
// "trace.duration" field, corresponding to the elapsed time and a
|
||||
// "trace.func" field, corresponding to the function that called WithTrace.
|
||||
//
|
||||
// The logging keys "trace.id" and "trace.parent.id" are provided to implement
|
||||
// dapper-like tracing. This function should be complemented with a WithSpan
|
||||
// method that could be used for tracing distributed RPC calls.
|
||||
//
|
||||
// The main benefit of this function is to post-process log messages or
|
||||
// intercept them in a hook to provide timing data. Trace ids and parent ids
|
||||
// can also be linked to provide call tracing, if so required.
|
||||
//
|
||||
// Here is an example of the usage:
|
||||
//
|
||||
// func timedOperation(ctx Context) {
|
||||
// ctx, done := WithTrace(ctx)
|
||||
// defer done("this will be the log message")
|
||||
// // ... function body ...
|
||||
// }
|
||||
//
|
||||
// If the function ran for roughly 1s, such a usage would emit a log message
|
||||
// as follows:
|
||||
//
|
||||
// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ...
|
||||
//
|
||||
// Notice that the function name is automatically resolved, along with the
|
||||
// package and a trace id is emitted that can be linked with parent ids.
|
||||
func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) {
|
||||
if ctx == nil {
|
||||
ctx = Background()
|
||||
}
|
||||
|
||||
pc, file, line, _ := runtime.Caller(1)
|
||||
f := runtime.FuncForPC(pc)
|
||||
ctx = &traced{
|
||||
Context: ctx,
|
||||
id: uuid.Generate().String(),
|
||||
start: time.Now(),
|
||||
parent: GetStringValue(ctx, "trace.id"),
|
||||
fnname: f.Name(),
|
||||
file: file,
|
||||
line: line,
|
||||
}
|
||||
|
||||
return ctx, func(format string, a ...interface{}) {
|
||||
GetLogger(ctx,
|
||||
"trace.duration",
|
||||
"trace.id",
|
||||
"trace.parent.id",
|
||||
"trace.func",
|
||||
"trace.file",
|
||||
"trace.line").
|
||||
Debugf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// traced represents a context that is traced for function call timing. It
|
||||
// also provides fast lookup for the various attributes that are available on
|
||||
// the trace.
|
||||
type traced struct {
|
||||
Context
|
||||
id string
|
||||
parent string
|
||||
start time.Time
|
||||
fnname string
|
||||
file string
|
||||
line int
|
||||
}
|
||||
|
||||
func (ts *traced) Value(key interface{}) interface{} {
|
||||
switch key {
|
||||
case "trace.start":
|
||||
return ts.start
|
||||
case "trace.duration":
|
||||
return time.Since(ts.start)
|
||||
case "trace.id":
|
||||
return ts.id
|
||||
case "trace.parent.id":
|
||||
if ts.parent == "" {
|
||||
return nil // must return nil to signal no parent.
|
||||
}
|
||||
|
||||
return ts.parent
|
||||
case "trace.func":
|
||||
return ts.fnname
|
||||
case "trace.file":
|
||||
return ts.file
|
||||
case "trace.line":
|
||||
return ts.line
|
||||
}
|
||||
|
||||
return ts.Context.Value(key)
|
||||
}
|
||||
85
vendor/github.com/docker/distribution/context/trace_test.go
generated
vendored
Normal file
85
vendor/github.com/docker/distribution/context/trace_test.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestWithTrace ensures that tracing has the expected values in the context.
|
||||
func TestWithTrace(t *testing.T) {
|
||||
pc, file, _, _ := runtime.Caller(0) // get current caller.
|
||||
f := runtime.FuncForPC(pc)
|
||||
|
||||
base := []valueTestCase{
|
||||
{
|
||||
key: "trace.id",
|
||||
notnilorempty: true,
|
||||
},
|
||||
|
||||
{
|
||||
key: "trace.file",
|
||||
expected: file,
|
||||
notnilorempty: true,
|
||||
},
|
||||
{
|
||||
key: "trace.line",
|
||||
notnilorempty: true,
|
||||
},
|
||||
{
|
||||
key: "trace.start",
|
||||
notnilorempty: true,
|
||||
},
|
||||
}
|
||||
|
||||
ctx, done := WithTrace(Background())
|
||||
defer done("this will be emitted at end of test")
|
||||
|
||||
checkContextForValues(t, ctx, append(base, valueTestCase{
|
||||
key: "trace.func",
|
||||
expected: f.Name(),
|
||||
}))
|
||||
|
||||
traced := func() {
|
||||
parentID := ctx.Value("trace.id") // ensure the parent trace id is correct.
|
||||
|
||||
pc, _, _, _ := runtime.Caller(0) // get current caller.
|
||||
f := runtime.FuncForPC(pc)
|
||||
ctx, done := WithTrace(ctx)
|
||||
defer done("this should be subordinate to the other trace")
|
||||
time.Sleep(time.Second)
|
||||
checkContextForValues(t, ctx, append(base, valueTestCase{
|
||||
key: "trace.func",
|
||||
expected: f.Name(),
|
||||
}, valueTestCase{
|
||||
key: "trace.parent.id",
|
||||
expected: parentID,
|
||||
}))
|
||||
}
|
||||
traced()
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
type valueTestCase struct {
|
||||
key string
|
||||
expected interface{}
|
||||
notnilorempty bool // just check not empty/not nil
|
||||
}
|
||||
|
||||
func checkContextForValues(t *testing.T, ctx Context, values []valueTestCase) {
|
||||
|
||||
for _, testcase := range values {
|
||||
v := ctx.Value(testcase.key)
|
||||
if testcase.notnilorempty {
|
||||
if v == nil || v == "" {
|
||||
t.Fatalf("value was nil or empty for %q: %#v", testcase.key, v)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if v != testcase.expected {
|
||||
t.Fatalf("unexpected value for key %q: %v != %v", testcase.key, v, testcase.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
24
vendor/github.com/docker/distribution/context/util.go
generated
vendored
Normal file
24
vendor/github.com/docker/distribution/context/util.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Since looks up key, which should be a time.Time, and returns the duration
|
||||
// since that time. If the key is not found, the value returned will be zero.
|
||||
// This is helpful when inferring metrics related to context execution times.
|
||||
func Since(ctx Context, key interface{}) time.Duration {
|
||||
if startedAt, ok := ctx.Value(key).(time.Time); ok {
|
||||
return time.Since(startedAt)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetStringValue returns a string value from the context. The empty string
|
||||
// will be returned if not found.
|
||||
func GetStringValue(ctx Context, key interface{}) (value string) {
|
||||
if valuev, ok := ctx.Value(key).(string); ok {
|
||||
value = valuev
|
||||
}
|
||||
return value
|
||||
}
|
||||
16
vendor/github.com/docker/distribution/context/version.go
generated
vendored
Normal file
16
vendor/github.com/docker/distribution/context/version.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
package context
|
||||
|
||||
// WithVersion stores the application version in the context. The new context
|
||||
// gets a logger to ensure log messages are marked with the application
|
||||
// version.
|
||||
func WithVersion(ctx Context, version string) Context {
|
||||
ctx = WithValue(ctx, "version", version)
|
||||
// push a new logger onto the stack
|
||||
return WithLogger(ctx, GetLogger(ctx, "version"))
|
||||
}
|
||||
|
||||
// GetVersion returns the application version from the context. An empty
|
||||
// string may returned if the version was not set on the context.
|
||||
func GetVersion(ctx Context) string {
|
||||
return GetStringValue(ctx, "version")
|
||||
}
|
||||
19
vendor/github.com/docker/distribution/context/version_test.go
generated
vendored
Normal file
19
vendor/github.com/docker/distribution/context/version_test.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
package context
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestVersionContext(t *testing.T) {
|
||||
ctx := Background()
|
||||
|
||||
if GetVersion(ctx) != "" {
|
||||
t.Fatalf("context should not yet have a version")
|
||||
}
|
||||
|
||||
expected := "2.1-whatever"
|
||||
ctx = WithVersion(ctx, expected)
|
||||
version := GetVersion(ctx)
|
||||
|
||||
if version != expected {
|
||||
t.Fatalf("version was not set: %q != %q", version, expected)
|
||||
}
|
||||
}
|
||||
36
vendor/github.com/docker/distribution/contrib/apache/README.MD
generated
vendored
Normal file
36
vendor/github.com/docker/distribution/contrib/apache/README.MD
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
# Apache HTTPd sample for Registry v1, v2 and mirror
|
||||
|
||||
3 containers involved
|
||||
|
||||
* Docker Registry v1 (registry 0.9.1)
|
||||
* Docker Registry v2 (registry 2.0.0)
|
||||
* Docker Registry v1 in mirror mode
|
||||
|
||||
HTTP for mirror and HTTPS for v1 & v2
|
||||
|
||||
* http://registry.example.com proxify Docker Registry 1.0 in Mirror mode
|
||||
* https://registry.example.com proxify Docker Registry 1.0 or 2.0 in Hosting mode
|
||||
|
||||
## 3 Docker containers should be started
|
||||
|
||||
* Docker Registry 1.0 in Mirror mode : port 5001
|
||||
* Docker Registry 1.0 in Hosting mode : port 5000
|
||||
* Docker Registry 2.0 in Hosting mode : port 5002
|
||||
|
||||
### Registry v1
|
||||
|
||||
docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/docker-registry/storage/hosting-v1:/tmp -p 5000:5000 registry:0.9.1"
|
||||
|
||||
### Mirror
|
||||
|
||||
docker run -d -e SETTINGS_FLAVOR=dev -e STANDALONE=false -e MIRROR_SOURCE=https://registry-1.docker.io -e MIRROR_SOURCE_INDEX=https://index.docker.io \
|
||||
-e MIRROR_TAGS_CACHE_TTL=172800 -v /var/lib/docker-registry/storage/mirror:/tmp -p 5001:5000 registry:0.9.1"
|
||||
|
||||
### Registry v2
|
||||
|
||||
docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/axway/docker-registry/storage/hosting2-v2:/tmp -p 5002:5000 registry:2"
|
||||
|
||||
# For Hosting mode access
|
||||
|
||||
* users should have account (valid-user) to be able to fetch images
|
||||
* only users using account docker-deployer will be allowed to push images
|
||||
127
vendor/github.com/docker/distribution/contrib/apache/apache.conf
generated
vendored
Normal file
127
vendor/github.com/docker/distribution/contrib/apache/apache.conf
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
#
|
||||
# Sample Apache 2.x configuration where :
|
||||
#
|
||||
|
||||
<VirtualHost *:80>
|
||||
|
||||
ServerName registry.example.com
|
||||
ServerAlias www.registry.example.com
|
||||
|
||||
ProxyRequests off
|
||||
ProxyPreserveHost on
|
||||
|
||||
# no proxy for /error/ (Apache HTTPd errors messages)
|
||||
ProxyPass /error/ !
|
||||
|
||||
ProxyPass /_ping http://localhost:5001/_ping
|
||||
ProxyPassReverse /_ping http://localhost:5001/_ping
|
||||
|
||||
ProxyPass /v1 http://localhost:5001/v1
|
||||
ProxyPassReverse /v1 http://localhost:5001/v1
|
||||
|
||||
# Logs
|
||||
ErrorLog ${APACHE_LOG_DIR}/mirror_error_log
|
||||
CustomLog ${APACHE_LOG_DIR}/mirror_access_log combined env=!dontlog
|
||||
|
||||
</VirtualHost>
|
||||
|
||||
|
||||
<VirtualHost *:443>
|
||||
|
||||
ServerName registry.example.com
|
||||
ServerAlias www.registry.example.com
|
||||
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/apache2/ssl/registry.example.com.crt
|
||||
SSLCertificateKeyFile /etc/apache2/ssl/registry.example.com.key
|
||||
|
||||
# Higher Strength SSL Ciphers
|
||||
SSLProtocol all -SSLv2 -SSLv3 -TLSv1
|
||||
SSLCipherSuite RC4-SHA:HIGH
|
||||
SSLHonorCipherOrder on
|
||||
|
||||
# Logs
|
||||
ErrorLog ${APACHE_LOG_DIR}/registry_error_ssl_log
|
||||
CustomLog ${APACHE_LOG_DIR}/registry_access_ssl_log combined env=!dontlog
|
||||
|
||||
Header always set "Docker-Distribution-Api-Version" "registry/2.0"
|
||||
Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0"
|
||||
RequestHeader set X-Forwarded-Proto "https"
|
||||
|
||||
ProxyRequests off
|
||||
ProxyPreserveHost on
|
||||
|
||||
# no proxy for /error/ (Apache HTTPd errors messages)
|
||||
ProxyPass /error/ !
|
||||
|
||||
#
|
||||
# Registry v1
|
||||
#
|
||||
|
||||
ProxyPass /v1 http://localhost:5000/v1
|
||||
ProxyPassReverse /v1 http://localhost:5000/v1
|
||||
|
||||
ProxyPass /_ping http://localhost:5000/_ping
|
||||
ProxyPassReverse /_ping http://localhost:5000/_ping
|
||||
|
||||
# Authentication require for push
|
||||
<Location /v1>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
AuthName "Registry Authentication"
|
||||
AuthType basic
|
||||
AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd"
|
||||
|
||||
# Read access to authentified users
|
||||
<Limit GET HEAD>
|
||||
Require valid-user
|
||||
</Limit>
|
||||
|
||||
# Write access to docker-deployer account only
|
||||
<Limit POST PUT DELETE>
|
||||
Require user docker-deployer
|
||||
</Limit>
|
||||
|
||||
</Location>
|
||||
|
||||
# Allow ping to run unauthenticated.
|
||||
<Location /v1/_ping>
|
||||
Satisfy any
|
||||
Allow from all
|
||||
</Location>
|
||||
|
||||
# Allow ping to run unauthenticated.
|
||||
<Location /_ping>
|
||||
Satisfy any
|
||||
Allow from all
|
||||
</Location>
|
||||
|
||||
#
|
||||
# Registry v2
|
||||
#
|
||||
|
||||
ProxyPass /v2 http://localhost:5002/v2
|
||||
ProxyPassReverse /v2 http://localhost:5002/v2
|
||||
|
||||
<Location /v2>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
AuthName "Registry Authentication"
|
||||
AuthType basic
|
||||
AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd"
|
||||
|
||||
# Read access to authentified users
|
||||
<Limit GET HEAD>
|
||||
Require valid-user
|
||||
</Limit>
|
||||
|
||||
# Write access to docker-deployer only
|
||||
<Limit POST PUT DELETE>
|
||||
Require user docker-deployer
|
||||
</Limit>
|
||||
|
||||
</Location>
|
||||
|
||||
|
||||
</VirtualHost>
|
||||
|
||||
147
vendor/github.com/docker/distribution/contrib/compose/README.md
generated
vendored
Normal file
147
vendor/github.com/docker/distribution/contrib/compose/README.md
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
# Docker Compose V1 + V2 registry
|
||||
|
||||
This compose configuration configures a `v1` and `v2` registry behind an `nginx`
|
||||
proxy. By default, you can access the combined registry at `localhost:5000`.
|
||||
|
||||
The configuration does not support pushing images to `v2` and pulling from `v1`.
|
||||
If a `docker` client has a version less than 1.6, Nginx will route its requests
|
||||
to the 1.0 registry. Requests from newer clients will route to the 2.0 registry.
|
||||
|
||||
### Install Docker Compose
|
||||
|
||||
1. Open a new terminal on the host with your `distribution` source.
|
||||
|
||||
2. Get the `docker-compose` binary.
|
||||
|
||||
$ sudo wget https://github.com/docker/compose/releases/download/1.1.0/docker-compose-`uname -s`-`uname -m` -O /usr/local/bin/docker-compose
|
||||
|
||||
This command installs the binary in the `/usr/local/bin` directory.
|
||||
|
||||
3. Add executable permissions to the binary.
|
||||
|
||||
$ sudo chmod +x /usr/local/bin/docker-compose
|
||||
|
||||
## Build and run with Compose
|
||||
|
||||
1. In your terminal, navigate to the `distribution/contrib/compose` directory
|
||||
|
||||
This directory includes a single `docker-compose.yml` configuration.
|
||||
|
||||
nginx:
|
||||
build: "nginx"
|
||||
ports:
|
||||
- "5000:5000"
|
||||
links:
|
||||
- registryv1:registryv1
|
||||
- registryv2:registryv2
|
||||
registryv1:
|
||||
image: registry
|
||||
ports:
|
||||
- "5000"
|
||||
registryv2:
|
||||
build: "../../"
|
||||
ports:
|
||||
- "5000"
|
||||
|
||||
This configuration builds a new `nginx` image as specified by the
|
||||
`nginx/Dockerfile` file. The 1.0 registry comes from Docker's official
|
||||
public image. Finally, the registry 2.0 image is built from the
|
||||
`distribution/Dockerfile` you've used previously.
|
||||
|
||||
2. Get a registry 1.0 image.
|
||||
|
||||
$ docker pull registry:0.9.1
|
||||
|
||||
The Compose configuration looks for this image locally. If you don't do this
|
||||
step, later steps can fail.
|
||||
|
||||
3. Build `nginx`, the registry 2.0 image, and
|
||||
|
||||
$ docker-compose build
|
||||
registryv1 uses an image, skipping
|
||||
Building registryv2...
|
||||
Step 0 : FROM golang:1.4
|
||||
|
||||
...
|
||||
|
||||
Removing intermediate container 9f5f5068c3f3
|
||||
Step 4 : COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf
|
||||
---> 74acc70fa106
|
||||
Removing intermediate container edb84c2b40cb
|
||||
Successfully built 74acc70fa106
|
||||
|
||||
The commmand outputs its progress until it completes.
|
||||
|
||||
4. Start your configuration with compose.
|
||||
|
||||
$ docker-compose up
|
||||
Recreating compose_registryv1_1...
|
||||
Recreating compose_registryv2_1...
|
||||
Recreating compose_nginx_1...
|
||||
Attaching to compose_registryv1_1, compose_registryv2_1, compose_nginx_1
|
||||
...
|
||||
|
||||
|
||||
5. In another terminal, display the running configuration.
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
a81ad2557702 compose_nginx:latest "nginx -g 'daemon of 8 minutes ago Up 8 minutes 80/tcp, 443/tcp, 0.0.0.0:5000->5000/tcp compose_nginx_1
|
||||
0618437450dd compose_registryv2:latest "registry cmd/regist 8 minutes ago Up 8 minutes 0.0.0.0:32777->5000/tcp compose_registryv2_1
|
||||
aa82b1ed8e61 registry:latest "docker-registry" 8 minutes ago Up 8 minutes 0.0.0.0:32776->5000/tcp compose_registryv1_1
|
||||
|
||||
### Explore a bit
|
||||
|
||||
1. Check for TLS on your `nginx` server.
|
||||
|
||||
$ curl -v https://localhost:5000
|
||||
* Rebuilt URL to: https://localhost:5000/
|
||||
* Hostname was NOT found in DNS cache
|
||||
* Trying 127.0.0.1...
|
||||
* Connected to localhost (127.0.0.1) port 5000 (#0)
|
||||
* successfully set certificate verify locations:
|
||||
* CAfile: none
|
||||
CApath: /etc/ssl/certs
|
||||
* SSLv3, TLS handshake, Client hello (1):
|
||||
* SSLv3, TLS handshake, Server hello (2):
|
||||
* SSLv3, TLS handshake, CERT (11):
|
||||
* SSLv3, TLS alert, Server hello (2):
|
||||
* SSL certificate problem: self signed certificate
|
||||
* Closing connection 0
|
||||
curl: (60) SSL certificate problem: self signed certificate
|
||||
More details here: http://curl.haxx.se/docs/sslcerts.html
|
||||
|
||||
2. Tag the `v1` registry image.
|
||||
|
||||
$ docker tag registry:latest localhost:5000/registry_one:latest
|
||||
|
||||
2. Push it to the localhost.
|
||||
|
||||
$ docker push localhost:5000/registry_one:latest
|
||||
|
||||
If you are using the 1.6 Docker client, this pushes the image the `v2 `registry.
|
||||
|
||||
4. Use `curl` to list the image in the registry.
|
||||
|
||||
$ curl -v -X GET http://localhost:5000/v2/registry_one/tags/list
|
||||
* Hostname was NOT found in DNS cache
|
||||
* Trying 127.0.0.1...
|
||||
* Connected to localhost (127.0.0.1) port 32777 (#0)
|
||||
> GET /v2/registry1/tags/list HTTP/1.1
|
||||
> User-Agent: curl/7.36.0
|
||||
> Host: localhost:5000
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Content-Type: application/json; charset=utf-8
|
||||
< Docker-Distribution-Api-Version: registry/2.0
|
||||
< Date: Tue, 14 Apr 2015 22:34:13 GMT
|
||||
< Content-Length: 39
|
||||
<
|
||||
{"name":"registry_one","tags":["latest"]}
|
||||
* Connection #0 to host localhost left intact
|
||||
|
||||
This example refers to the specific port assigned to the 2.0 registry. You saw
|
||||
this port earlier, when you used `docker ps` to show your running containers.
|
||||
|
||||
|
||||
15
vendor/github.com/docker/distribution/contrib/compose/docker-compose.yml
generated
vendored
Normal file
15
vendor/github.com/docker/distribution/contrib/compose/docker-compose.yml
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
nginx:
|
||||
build: "nginx"
|
||||
ports:
|
||||
- "5000:5000"
|
||||
links:
|
||||
- registryv1:registryv1
|
||||
- registryv2:registryv2
|
||||
registryv1:
|
||||
image: registry
|
||||
ports:
|
||||
- "5000"
|
||||
registryv2:
|
||||
build: "../../"
|
||||
ports:
|
||||
- "5000"
|
||||
6
vendor/github.com/docker/distribution/contrib/compose/nginx/Dockerfile
generated
vendored
Normal file
6
vendor/github.com/docker/distribution/contrib/compose/nginx/Dockerfile
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
FROM nginx:1.7
|
||||
|
||||
COPY nginx.conf /etc/nginx/nginx.conf
|
||||
COPY registry.conf /etc/nginx/conf.d/registry.conf
|
||||
COPY docker-registry.conf /etc/nginx/docker-registry.conf
|
||||
COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf
|
||||
6
vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf
generated
vendored
Normal file
6
vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
proxy_pass http://docker-registry-v2;
|
||||
proxy_set_header Host $http_host; # required for docker client's sake
|
||||
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 900;
|
||||
7
vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf
generated
vendored
Normal file
7
vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
proxy_pass http://docker-registry;
|
||||
proxy_set_header Host $http_host; # required for docker client's sake
|
||||
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Authorization ""; # For basic auth through nginx in v1 to work, please comment this line
|
||||
proxy_read_timeout 900;
|
||||
27
vendor/github.com/docker/distribution/contrib/compose/nginx/nginx.conf
generated
vendored
Normal file
27
vendor/github.com/docker/distribution/contrib/compose/nginx/nginx.conf
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
user nginx;
|
||||
worker_processes 1;
|
||||
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
sendfile on;
|
||||
|
||||
keepalive_timeout 65;
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
}
|
||||
|
||||
41
vendor/github.com/docker/distribution/contrib/compose/nginx/registry.conf
generated
vendored
Normal file
41
vendor/github.com/docker/distribution/contrib/compose/nginx/registry.conf
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
# Docker registry proxy for api versions 1 and 2
|
||||
|
||||
upstream docker-registry {
|
||||
server registryv1:5000;
|
||||
}
|
||||
|
||||
upstream docker-registry-v2 {
|
||||
server registryv2:5000;
|
||||
}
|
||||
|
||||
# No client auth or TLS
|
||||
server {
|
||||
listen 5000;
|
||||
server_name localhost;
|
||||
|
||||
# disable any limits to avoid HTTP 413 for large image uploads
|
||||
client_max_body_size 0;
|
||||
|
||||
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
|
||||
chunked_transfer_encoding on;
|
||||
|
||||
location /v2/ {
|
||||
# Do not allow connections from docker 1.5 and earlier
|
||||
# docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents
|
||||
if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) {
|
||||
return 404;
|
||||
}
|
||||
|
||||
# To add basic authentication to v2 use auth_basic setting plus add_header
|
||||
# auth_basic "registry.localhost";
|
||||
# auth_basic_user_file test.password;
|
||||
# add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always;
|
||||
|
||||
include docker-registry-v2.conf;
|
||||
}
|
||||
|
||||
location / {
|
||||
include docker-registry.conf;
|
||||
}
|
||||
}
|
||||
|
||||
9
vendor/github.com/docker/distribution/contrib/docker-integration/Dockerfile
generated
vendored
Normal file
9
vendor/github.com/docker/distribution/contrib/docker-integration/Dockerfile
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
FROM distribution/golem:0.1
|
||||
|
||||
MAINTAINER Docker Distribution Team <distribution@docker.com>
|
||||
|
||||
RUN apk add --no-cache git
|
||||
|
||||
ENV TMPDIR /var/lib/docker/tmp
|
||||
|
||||
WORKDIR /go/src/github.com/docker/distribution/contrib/docker-integration
|
||||
63
vendor/github.com/docker/distribution/contrib/docker-integration/README.md
generated
vendored
Normal file
63
vendor/github.com/docker/distribution/contrib/docker-integration/README.md
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
# Docker Registry Integration Testing
|
||||
|
||||
These integration tests cover interactions between registry clients such as
|
||||
the docker daemon and the registry server. All tests can be run using the
|
||||
[golem integration test runner](https://github.com/docker/golem)
|
||||
|
||||
The integration tests configure components using docker compose
|
||||
(see docker-compose.yaml) and the runner can be using the golem
|
||||
configuration file (see golem.conf).
|
||||
|
||||
## Running integration tests
|
||||
|
||||
### Run using multiversion script
|
||||
|
||||
The integration tests in the `contrib/docker-integration` directory can be simply
|
||||
run by executing the run script `./run_multiversion.sh`. If there is no running
|
||||
daemon to connect to, run as `./run_multiversion.sh -d`.
|
||||
|
||||
This command will build the distribution image from the locally checked out
|
||||
version and run against multiple versions of docker defined in the script. To
|
||||
run a specific version of the registry or docker, Golem will need to be
|
||||
executed manually.
|
||||
|
||||
### Run manually using Golem
|
||||
|
||||
Using the golem tool directly allows running against multiple versions of
|
||||
the registry and docker. Running against multiple versions of the registry
|
||||
can be useful for testing changes in the docker daemon which are not
|
||||
covered by the default run script.
|
||||
|
||||
#### Installing Golem
|
||||
|
||||
Golem is distributed as an executable binary which can be installed from
|
||||
the [release page](https://github.com/docker/golem/releases/tag/v0.1).
|
||||
|
||||
#### Running golem with docker
|
||||
|
||||
Additionally golem can be run as a docker image requiring no additonal
|
||||
installation.
|
||||
|
||||
`docker run --privileged -v "$GOPATH/src/github.com/docker/distribution/contrib/docker-integration:/test" -w /test distribution/golem golem -rundaemon .`
|
||||
|
||||
#### Golem custom images
|
||||
|
||||
Golem tests version of software by defining the docker image to test.
|
||||
|
||||
Run with registry 2.2.1 and docker 1.10.3
|
||||
|
||||
`golem -i golem-dind:latest,docker:1.10.3-dind,1.10.3 -i golem-distribution:latest,registry:2.2.1 .`
|
||||
|
||||
|
||||
#### Use golem caching for developing tests
|
||||
|
||||
Golem allows caching image configuration to reduce test start up time.
|
||||
Using this cache will allow tests with the same set of images to start
|
||||
up quickly. This can be useful when developing tests and needing the
|
||||
test to run quickly. If there are changes which effect the image (such as
|
||||
building a new registry image), then startup time will be slower.
|
||||
|
||||
Run this command multiple times and after the first time test runs
|
||||
should start much quicker.
|
||||
`golem -cache ~/.cache/docker/golem -i golem-dind:latest,docker:1.10.3-dind,1.10.3 -i golem-distribution:latest,registry:2.2.1 .`
|
||||
|
||||
91
vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml
generated
vendored
Normal file
91
vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
nginx:
|
||||
build: "nginx"
|
||||
ports:
|
||||
- "5000:5000"
|
||||
- "5002:5002"
|
||||
- "5440:5440"
|
||||
- "5441:5441"
|
||||
- "5442:5442"
|
||||
- "5443:5443"
|
||||
- "5444:5444"
|
||||
- "5445:5445"
|
||||
- "5446:5446"
|
||||
- "5447:5447"
|
||||
- "5448:5448"
|
||||
- "5554:5554"
|
||||
- "5555:5555"
|
||||
- "5556:5556"
|
||||
- "5557:5557"
|
||||
- "5558:5558"
|
||||
- "5559:5559"
|
||||
- "5600:5600"
|
||||
- "6666:6666"
|
||||
links:
|
||||
- registryv2:registryv2
|
||||
- malevolent:malevolent
|
||||
- registryv2token:registryv2token
|
||||
- tokenserver:tokenserver
|
||||
- registryv2tokenoauth:registryv2tokenoauth
|
||||
- registryv2tokenoauthnotls:registryv2tokenoauthnotls
|
||||
- tokenserveroauth:tokenserveroauth
|
||||
registryv2:
|
||||
image: golem-distribution:latest
|
||||
ports:
|
||||
- "5000"
|
||||
registryv2token:
|
||||
image: golem-distribution:latest
|
||||
ports:
|
||||
- "5000"
|
||||
volumes:
|
||||
- ./tokenserver/registry-config.yml:/etc/docker/registry/config.yml
|
||||
- ./tokenserver/certs/localregistry.cert:/etc/docker/registry/localregistry.cert
|
||||
- ./tokenserver/certs/localregistry.key:/etc/docker/registry/localregistry.key
|
||||
- ./tokenserver/certs/signing.cert:/etc/docker/registry/tokenbundle.pem
|
||||
tokenserver:
|
||||
build: "tokenserver"
|
||||
command: "--debug -addr 0.0.0.0:5556 -issuer registry-test -passwd .htpasswd -tlscert tls.cert -tlskey tls.key -key sign.key -realm http://auth.localregistry:5556"
|
||||
ports:
|
||||
- "5556"
|
||||
registryv2tokenoauth:
|
||||
image: golem-distribution:latest
|
||||
ports:
|
||||
- "5000"
|
||||
volumes:
|
||||
- ./tokenserver-oauth/registry-config.yml:/etc/docker/registry/config.yml
|
||||
- ./tokenserver-oauth/certs/localregistry.cert:/etc/docker/registry/localregistry.cert
|
||||
- ./tokenserver-oauth/certs/localregistry.key:/etc/docker/registry/localregistry.key
|
||||
- ./tokenserver-oauth/certs/signing.cert:/etc/docker/registry/tokenbundle.pem
|
||||
registryv2tokenoauthnotls:
|
||||
image: golem-distribution:latest
|
||||
ports:
|
||||
- "5000"
|
||||
volumes:
|
||||
- ./tokenserver-oauth/registry-config-notls.yml:/etc/docker/registry/config.yml
|
||||
- ./tokenserver-oauth/certs/signing.cert:/etc/docker/registry/tokenbundle.pem
|
||||
tokenserveroauth:
|
||||
build: "tokenserver-oauth"
|
||||
command: "--debug -addr 0.0.0.0:5559 -issuer registry-test -passwd .htpasswd -tlscert tls.cert -tlskey tls.key -key sign.key -realm http://auth.localregistry:5559 -enforce-class"
|
||||
ports:
|
||||
- "5559"
|
||||
malevolent:
|
||||
image: "dmcgowan/malevolent:0.1.0"
|
||||
command: "-l 0.0.0.0:6666 -r http://registryv2:5000 -c /certs/localregistry.cert -k /certs/localregistry.key"
|
||||
links:
|
||||
- registryv2:registryv2
|
||||
volumes:
|
||||
- ./malevolent-certs:/certs:ro
|
||||
ports:
|
||||
- "6666"
|
||||
docker:
|
||||
image: golem-dind:latest
|
||||
container_name: dockerdaemon
|
||||
command: "docker daemon --debug -s $DOCKER_GRAPHDRIVER"
|
||||
privileged: true
|
||||
environment:
|
||||
DOCKER_GRAPHDRIVER:
|
||||
volumes:
|
||||
- /etc/generated_certs.d:/etc/docker/certs.d
|
||||
- /var/lib/docker
|
||||
links:
|
||||
- nginx:localregistry
|
||||
- nginx:auth.localregistry
|
||||
18
vendor/github.com/docker/distribution/contrib/docker-integration/golem.conf
generated
vendored
Normal file
18
vendor/github.com/docker/distribution/contrib/docker-integration/golem.conf
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
[[suite]]
|
||||
dind=true
|
||||
images=[ "nginx:1.9", "dmcgowan/token-server:simple", "dmcgowan/token-server:oauth", "dmcgowan/malevolent:0.1.0", "dmcgowan/ncat:latest" ]
|
||||
|
||||
[[suite.pretest]]
|
||||
command="sh ./install_certs.sh /etc/generated_certs.d"
|
||||
[[suite.testrunner]]
|
||||
command="bats -t ."
|
||||
format="tap"
|
||||
env=["TEST_REPO=hello-world", "TEST_TAG=latest", "TEST_USER=testuser", "TEST_PASSWORD=passpassword", "TEST_REGISTRY=localregistry", "TEST_SKIP_PULL=true"]
|
||||
[[suite.customimage]]
|
||||
tag="golem-distribution:latest"
|
||||
default="registry:2.2.1"
|
||||
[[suite.customimage]]
|
||||
tag="golem-dind:latest"
|
||||
default="docker:1.10.1-dind"
|
||||
version="1.10.1"
|
||||
|
||||
127
vendor/github.com/docker/distribution/contrib/docker-integration/helpers.bash
generated
vendored
Normal file
127
vendor/github.com/docker/distribution/contrib/docker-integration/helpers.bash
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
# has_digest enforces the last output line is "Digest: sha256:..."
|
||||
# the input is the output from a docker push cli command
|
||||
function has_digest() {
|
||||
filtered=$(echo "$1" |sed -rn '/[dD]igest\: sha(256|384|512)/ p')
|
||||
[ "$filtered" != "" ]
|
||||
# See http://wiki.alpinelinux.org/wiki/Regex#BREs before making changes to regex
|
||||
digest=$(expr "$filtered" : ".*\(sha[0-9]\{3,3\}:[a-z0-9]*\)")
|
||||
}
|
||||
|
||||
# tempImage creates a new image using the provided name
|
||||
# requires bats
|
||||
function tempImage() {
|
||||
dir=$(mktemp -d)
|
||||
run dd if=/dev/urandom of="$dir/f" bs=1024 count=512
|
||||
cat <<DockerFileContent > "$dir/Dockerfile"
|
||||
FROM scratch
|
||||
COPY f /f
|
||||
|
||||
CMD []
|
||||
DockerFileContent
|
||||
|
||||
cp_t $dir "/tmpbuild/"
|
||||
exec_t "cd /tmpbuild/; docker build --no-cache -t $1 .; rm -rf /tmpbuild/"
|
||||
}
|
||||
|
||||
# skip basic auth tests with Docker 1.6, where they don't pass due to
|
||||
# certificate issues, requires bats
|
||||
function basic_auth_version_check() {
|
||||
run sh -c 'docker version | fgrep -q "Client version: 1.6."'
|
||||
if [ "$status" -eq 0 ]; then
|
||||
skip "Basic auth tests don't support 1.6.x"
|
||||
fi
|
||||
}
|
||||
|
||||
email="a@nowhere.com"
|
||||
|
||||
# docker_t_login calls login with email depending on version
|
||||
function docker_t_login() {
|
||||
# Only pass email field pre 1.11, no deprecation warning
|
||||
parse_version "$GOLEM_DIND_VERSION"
|
||||
v=$version
|
||||
parse_version "1.11.0"
|
||||
if [ "$v" -lt "$version" ]; then
|
||||
run docker_t login -e $email $@
|
||||
else
|
||||
run docker_t login $@
|
||||
fi
|
||||
}
|
||||
|
||||
# login issues a login to docker to the provided server
|
||||
# uses user, password, and email variables set outside of function
|
||||
# requies bats
|
||||
function login() {
|
||||
rm -f /root/.docker/config.json
|
||||
|
||||
docker_t_login -u $user -p $password $1
|
||||
if [ "$status" -ne 0 ]; then
|
||||
echo $output
|
||||
fi
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# Handle different deprecation warnings
|
||||
parse_version "$GOLEM_DIND_VERSION"
|
||||
v=$version
|
||||
parse_version "1.11.0"
|
||||
if [ "$v" -lt "$version" ]; then
|
||||
# First line is WARNING about credential save or email deprecation (maybe both)
|
||||
[ "${lines[2]}" = "Login Succeeded" -o "${lines[1]}" = "Login Succeeded" ]
|
||||
else
|
||||
[ "${lines[0]}" = "Login Succeeded" ]
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
function login_oauth() {
|
||||
login $@
|
||||
|
||||
tmpFile=$(mktemp)
|
||||
get_file_t /root/.docker/config.json $tmpFile
|
||||
run awk -v RS="" "/\"$1\": \\{[[:space:]]+\"auth\": \"[[:alnum:]]+\",[[:space:]]+\"identitytoken\"/ {exit 3}" $tmpFile
|
||||
[ "$status" -eq 3 ]
|
||||
}
|
||||
|
||||
function parse_version() {
|
||||
version=$(echo "$1" | cut -d '-' -f1) # Strip anything after '-'
|
||||
major=$(echo "$version" | cut -d . -f1)
|
||||
minor=$(echo "$version" | cut -d . -f2)
|
||||
rev=$(echo "$version" | cut -d . -f3)
|
||||
|
||||
version=$((major * 1000 * 1000 + minor * 1000 + rev))
|
||||
}
|
||||
|
||||
function version_check() {
|
||||
name=$1
|
||||
checkv=$2
|
||||
minv=$3
|
||||
parse_version "$checkv"
|
||||
v=$version
|
||||
parse_version "$minv"
|
||||
if [ "$v" -lt "$version" ]; then
|
||||
skip "$name version \"$checkv\" does not meet required version \"$minv\""
|
||||
fi
|
||||
}
|
||||
|
||||
function get_file_t() {
|
||||
docker cp dockerdaemon:$1 $2
|
||||
}
|
||||
|
||||
function cp_t() {
|
||||
docker cp $1 dockerdaemon:$2
|
||||
}
|
||||
|
||||
function exec_t() {
|
||||
docker exec dockerdaemon sh -c "$@"
|
||||
}
|
||||
|
||||
function docker_t() {
|
||||
docker exec dockerdaemon docker $@
|
||||
}
|
||||
|
||||
# build creates a new docker image id from another image
|
||||
function build() {
|
||||
docker exec -i dockerdaemon docker build --no-cache -t $1 - <<DOCKERFILE
|
||||
FROM $2
|
||||
MAINTAINER distribution@docker.com
|
||||
DOCKERFILE
|
||||
}
|
||||
50
vendor/github.com/docker/distribution/contrib/docker-integration/install_certs.sh
generated
vendored
Normal file
50
vendor/github.com/docker/distribution/contrib/docker-integration/install_certs.sh
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
hostname="localregistry"
|
||||
installdir="$1"
|
||||
|
||||
install_ca() {
|
||||
mkdir -p $1/$hostname:$2
|
||||
cp ./nginx/ssl/registry-ca+ca.pem $1/$hostname:$2/ca.crt
|
||||
if [ "$3" != "" ]; then
|
||||
cp ./nginx/ssl/registry-$3+client-cert.pem $1/$hostname:$2/client.cert
|
||||
cp ./nginx/ssl/registry-$3+client-key.pem $1/$hostname:$2/client.key
|
||||
fi
|
||||
}
|
||||
|
||||
install_test_certs() {
|
||||
install_ca $1 5440
|
||||
install_ca $1 5441
|
||||
install_ca $1 5442 ca
|
||||
install_ca $1 5443 noca
|
||||
install_ca $1 5444 ca
|
||||
install_ca $1 5447 ca
|
||||
# For test remove CA
|
||||
rm $1/${hostname}:5447/ca.crt
|
||||
install_ca $1 5448
|
||||
install_ca $1 5600
|
||||
}
|
||||
|
||||
install_ca_file() {
|
||||
mkdir -p $2
|
||||
cp $1 $2/ca.crt
|
||||
}
|
||||
|
||||
append_ca_file() {
|
||||
mkdir -p $2
|
||||
cat $1 >> $2/ca.crt
|
||||
}
|
||||
|
||||
install_test_certs $installdir
|
||||
|
||||
# Malevolent server
|
||||
install_ca_file ./malevolent-certs/ca.pem $installdir/$hostname:6666
|
||||
|
||||
# Token server
|
||||
install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5554
|
||||
install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5555
|
||||
install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5557
|
||||
install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5558
|
||||
append_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5600
|
||||
|
||||
19
vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.cert
generated
vendored
Normal file
19
vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.cert
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDETCCAfugAwIBAgIQZRKt7OeG+TlC2riszYwQQTALBgkqhkiG9w0BAQswJjER
|
||||
MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDgyMDIz
|
||||
MjE0OVoXDTE4MDgwNDIzMjE0OVowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV
|
||||
BAMTDWxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
|
||||
AQDPdsUBStNMz4coXfQVIJIafG85VkngM4fV7hrg7AbiGLCWvq8cWOrYM50G9Wmo
|
||||
twK1WeQ6bigYOjINgSfTxcy3adciVZIIJyXqboz6n2V0yRPWpakof939bvuAurAP
|
||||
tSqQ2V5fGN0ZZn4J4IbXMSovKwo7sG3X6i4q/8DYHZ/mKjvCRMPC3MGWqunknpkm
|
||||
dzyKbIFHaDKlAqIOwTsDhHvGzm/9n3D+h4sl5ZPBobuBEV2u5GR0H5ujak4+Kczt
|
||||
thCWtRkzCfnjW0TEanheSYJGu8OgCGoFjQnHotgqvOO6iHZCsrB3gf8WQeou+y9e
|
||||
+OyLZv3FmqdC9SXr3b0LGQTFAgMBAAGjOjA4MA4GA1UdDwEB/wQEAwIAoDAMBgNV
|
||||
HRMBAf8EAjAAMBgGA1UdEQQRMA+CDWxvY2FscmVnaXN0cnkwCwYJKoZIhvcNAQEL
|
||||
A4IBAQC/PP2Y9QVhO8t4BXML1QpNRWqXG8Gg0P1XIh6M6FoxcGIodLdbzui828YB
|
||||
wm9ZlyKars+nDdgLdQWawdV7hSd6s2NeQlHYQSGLsdTAVkgIxiD7D2Tw3kAZ6Zrj
|
||||
dPikoVAc+rBMm/BXQLzy95IAbBVOHOpBkOOgF+TYxeLnOc3GzbUqBi1Pq97DMaxr
|
||||
DaDuywH55P/6v7qt610UIsZ6+RZ78iiRx4Q+oRxEqGT0rXI76gVxOFabbJuFr1n1
|
||||
kEWa3u/BssJzX3KVAm7oUtaBnj2SH5fokFmvZ5lBXA4QO/5doOa8yZiFFvvQs7EY
|
||||
SWDxLrvS33UCtsCcpPggjehnxKaC
|
||||
-----END CERTIFICATE-----
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user