1
0
mirror of https://github.com/openshift/installer.git synced 2026-02-05 15:47:14 +01:00

update vendor

This commit is contained in:
Rafael Fonseca
2024-06-10 18:36:33 +02:00
parent f455be90c5
commit d4e6184ecc
296 changed files with 39353 additions and 14888 deletions

View File

@@ -1,143 +0,0 @@
package reference
import "regexp"
var (
// alphaNumericRegexp defines the alpha numeric atom, typically a
// component of names. This only allows lower case characters and digits.
alphaNumericRegexp = match(`[a-z0-9]+`)
// separatorRegexp defines the separators allowed to be embedded in name
// components. This allow one period, one or two underscore and multiple
// dashes.
separatorRegexp = match(`(?:[._]|__|[-]*)`)
// nameComponentRegexp restricts registry path component names to start
// with at least one letter or number, with following parts able to be
// separated by one period, one or two underscore and multiple dashes.
nameComponentRegexp = expression(
alphaNumericRegexp,
optional(repeated(separatorRegexp, alphaNumericRegexp)))
// domainComponentRegexp restricts the registry domain component of a
// repository name to start with a component as defined by DomainRegexp
// and followed by an optional port.
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
// DomainRegexp defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names.
DomainRegexp = expression(
domainComponentRegexp,
optional(repeated(literal(`.`), domainComponentRegexp)),
optional(literal(`:`), match(`[0-9]+`)))
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
TagRegexp = match(`[\w][\w.-]{0,127}`)
// anchoredTagRegexp matches valid tag names, anchored at the start and
// end of the matched string.
anchoredTagRegexp = anchored(TagRegexp)
// DigestRegexp matches valid digests.
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
// anchoredDigestRegexp matches valid digests, anchored at the start and
// end of the matched string.
anchoredDigestRegexp = anchored(DigestRegexp)
// NameRegexp is the format for the name component of references. The
// regexp has capturing groups for the domain and name part omitting
// the separating forward slash from either.
NameRegexp = expression(
optional(DomainRegexp, literal(`/`)),
nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp)))
// anchoredNameRegexp is used to parse a name value, capturing the
// domain and trailing components.
anchoredNameRegexp = anchored(
optional(capture(DomainRegexp), literal(`/`)),
capture(nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp))))
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
ReferenceRegexp = anchored(capture(NameRegexp),
optional(literal(":"), capture(TagRegexp)),
optional(literal("@"), capture(DigestRegexp)))
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
IdentifierRegexp = match(`([a-f0-9]{64})`)
// ShortIdentifierRegexp is the format used to represent a prefix
// of an identifier. A prefix may be used to match a sha256 identifier
// within a list of trusted identifiers.
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
// anchoredIdentifierRegexp is used to check or match an
// identifier value, anchored at start and end of string.
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
// anchoredShortIdentifierRegexp is used to check if a value
// is a possible identifier prefix, anchored at start and end
// of string.
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
)
// match compiles the string to a regular expression.
var match = regexp.MustCompile
// literal compiles s into a literal regular expression, escaping any regexp
// reserved characters.
func literal(s string) *regexp.Regexp {
re := match(regexp.QuoteMeta(s))
if _, complete := re.LiteralPrefix(); !complete {
panic("must be a literal")
}
return re
}
// expression defines a full expression, where each regular expression must
// follow the previous.
func expression(res ...*regexp.Regexp) *regexp.Regexp {
var s string
for _, re := range res {
s += re.String()
}
return match(s)
}
// optional wraps the expression in a non-capturing group and makes the
// production optional.
func optional(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `?`)
}
// repeated wraps the regexp in a non-capturing group to get one or more
// matches.
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `+`)
}
// group wraps the regexp in a non-capturing group.
func group(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(?:` + expression(res...).String() + `)`)
}
// capture wraps the expression in a capturing group.
func capture(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(` + expression(res...).String() + `)`)
}
// anchored anchors the regular expression by adding start and end delimiters.
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
return match(`^` + expression(res...).String() + `$`)
}

View File

@@ -1,483 +0,0 @@
package sysregistriesv2
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"github.com/BurntSushi/toml"
"github.com/containers/image/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/containers/image/docker/reference"
)
// systemRegistriesConfPath is the path to the system-wide registry
// configuration file and is used to add/subtract potential registries for
// obtaining images. You can override this at build time with
// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path'
var systemRegistriesConfPath = builtinRegistriesConfPath
// builtinRegistriesConfPath is the path to the registry configuration file.
// DO NOT change this, instead see systemRegistriesConfPath above.
const builtinRegistriesConfPath = "/etc/containers/registries.conf"
// Endpoint describes a remote location of a registry.
type Endpoint struct {
// The endpoint's remote location.
Location string `toml:"location,omitempty"`
// If true, certs verification will be skipped and HTTP (non-TLS)
// connections will be allowed.
Insecure bool `toml:"insecure,omitempty"`
}
// rewriteReference will substitute the provided reference `prefix` to the
// endpoints `location` from the `ref` and creates a new named reference from it.
// The function errors if the newly created reference is not parsable.
func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) {
refString := ref.String()
if !refMatchesPrefix(refString, prefix) {
return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString)
}
newNamedRef := strings.Replace(refString, prefix, e.Location, 1)
newParsedRef, err := reference.ParseNamed(newNamedRef)
if err != nil {
return nil, errors.Wrapf(err, "error rewriting reference")
}
logrus.Debugf("reference rewritten from '%v' to '%v'", refString, newParsedRef.String())
return newParsedRef, nil
}
// Registry represents a registry.
type Registry struct {
// Prefix is used for matching images, and to translate one namespace to
// another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"`
// and we pull from "example.com/bar/myimage:latest", the image will
// effectively be pulled from "example.com/foo/bar/myimage:latest".
// If no Prefix is specified, it defaults to the specified location.
Prefix string `toml:"prefix"`
// A registry is an Endpoint too
Endpoint
// The registry's mirrors.
Mirrors []Endpoint `toml:"mirror,omitempty"`
// If true, pulling from the registry will be blocked.
Blocked bool `toml:"blocked,omitempty"`
// If true, mirrors will only be used for digest pulls. Pulling images by
// tag can potentially yield different images, depending on which endpoint
// we pull from. Forcing digest-pulls for mirrors avoids that issue.
MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"`
}
// PullSource consists of an Endpoint and a Reference. Note that the reference is
// rewritten according to the registries prefix and the Endpoint's location.
type PullSource struct {
Endpoint Endpoint
Reference reference.Named
}
// PullSourcesFromReference returns a slice of PullSource's based on the passed
// reference.
func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) {
var endpoints []Endpoint
if r.MirrorByDigestOnly {
// Only use mirrors when the reference is a digest one.
if _, isDigested := ref.(reference.Canonical); isDigested {
endpoints = append(r.Mirrors, r.Endpoint)
} else {
endpoints = []Endpoint{r.Endpoint}
}
} else {
endpoints = append(r.Mirrors, r.Endpoint)
}
sources := []PullSource{}
for _, ep := range endpoints {
rewritten, err := ep.rewriteReference(ref, r.Prefix)
if err != nil {
return nil, err
}
sources = append(sources, PullSource{Endpoint: ep, Reference: rewritten})
}
return sources, nil
}
// V1TOMLregistries is for backwards compatibility to sysregistries v1
type V1TOMLregistries struct {
Registries []string `toml:"registries"`
}
// V1TOMLConfig is for backwards compatibility to sysregistries v1
type V1TOMLConfig struct {
Search V1TOMLregistries `toml:"search"`
Insecure V1TOMLregistries `toml:"insecure"`
Block V1TOMLregistries `toml:"block"`
}
// V1RegistriesConf is the sysregistries v1 configuration format.
type V1RegistriesConf struct {
V1TOMLConfig `toml:"registries"`
}
// Nonempty returns true if config contains at least one configuration entry.
func (config *V1RegistriesConf) Nonempty() bool {
return (len(config.V1TOMLConfig.Search.Registries) != 0 ||
len(config.V1TOMLConfig.Insecure.Registries) != 0 ||
len(config.V1TOMLConfig.Block.Registries) != 0)
}
// V2RegistriesConf is the sysregistries v2 configuration format.
type V2RegistriesConf struct {
Registries []Registry `toml:"registry"`
// An array of host[:port] (not prefix!) entries to use for resolving unqualified image references
UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"`
}
// Nonempty returns true if config contains at least one configuration entry.
func (config *V2RegistriesConf) Nonempty() bool {
return (len(config.Registries) != 0 ||
len(config.UnqualifiedSearchRegistries) != 0)
}
// tomlConfig is the data type used to unmarshal the toml config.
type tomlConfig struct {
V2RegistriesConf
V1RegistriesConf // for backwards compatibility with sysregistries v1
}
// InvalidRegistries represents an invalid registry configurations. An example
// is when "registry.com" is defined multiple times in the configuration but
// with conflicting security settings.
type InvalidRegistries struct {
s string
}
// Error returns the error string.
func (e *InvalidRegistries) Error() string {
return e.s
}
// parseLocation parses the input string, performs some sanity checks and returns
// the sanitized input string. An error is returned if the input string is
// empty or if contains an "http{s,}://" prefix.
func parseLocation(input string) (string, error) {
trimmed := strings.TrimRight(input, "/")
if trimmed == "" {
return "", &InvalidRegistries{s: "invalid location: cannot be empty"}
}
if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") {
msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input)
return "", &InvalidRegistries{s: msg}
}
return trimmed, nil
}
// ConvertToV2 returns a v2 config corresponding to a v1 one.
func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) {
regMap := make(map[string]*Registry)
// The order of the registries is not really important, but make it deterministic (the same for the same config file)
// to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations.
registryOrder := []string{}
getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object
var err error
location, err = parseLocation(location)
if err != nil {
return nil, err
}
reg, exists := regMap[location]
if !exists {
reg = &Registry{
Endpoint: Endpoint{Location: location},
Mirrors: []Endpoint{},
Prefix: location,
}
regMap[location] = reg
registryOrder = append(registryOrder, location)
}
return reg, nil
}
for _, blocked := range config.V1TOMLConfig.Block.Registries {
reg, err := getRegistry(blocked)
if err != nil {
return nil, err
}
reg.Blocked = true
}
for _, insecure := range config.V1TOMLConfig.Insecure.Registries {
reg, err := getRegistry(insecure)
if err != nil {
return nil, err
}
reg.Insecure = true
}
res := &V2RegistriesConf{
UnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries,
}
for _, location := range registryOrder {
reg := regMap[location]
res.Registries = append(res.Registries, *reg)
}
return res, nil
}
// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries.
var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$")
// postProcess checks the consistency of all the configuration, looks for conflicts,
// and normalizes the configuration (e.g., sets the Prefix to Location if not set).
func (config *V2RegistriesConf) postProcess() error {
regMap := make(map[string][]*Registry)
for i := range config.Registries {
reg := &config.Registries[i]
// make sure Location and Prefix are valid
var err error
reg.Location, err = parseLocation(reg.Location)
if err != nil {
return err
}
if reg.Prefix == "" {
reg.Prefix = reg.Location
} else {
reg.Prefix, err = parseLocation(reg.Prefix)
if err != nil {
return err
}
}
// make sure mirrors are valid
for _, mir := range reg.Mirrors {
mir.Location, err = parseLocation(mir.Location)
if err != nil {
return err
}
}
regMap[reg.Location] = append(regMap[reg.Location], reg)
}
// Given a registry can be mentioned multiple times (e.g., to have
// multiple prefixes backed by different mirrors), we need to make sure
// there are no conflicts among them.
//
// Note: we need to iterate over the registries array to ensure a
// deterministic behavior which is not guaranteed by maps.
for _, reg := range config.Registries {
others, _ := regMap[reg.Location]
for _, other := range others {
if reg.Insecure != other.Insecure {
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location)
return &InvalidRegistries{s: msg}
}
if reg.Blocked != other.Blocked {
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location)
return &InvalidRegistries{s: msg}
}
}
}
for i := range config.UnqualifiedSearchRegistries {
registry, err := parseLocation(config.UnqualifiedSearchRegistries[i])
if err != nil {
return err
}
if !anchoredDomainRegexp.MatchString(registry) {
return &InvalidRegistries{fmt.Sprintf("Invalid unqualified-search-registries entry %#v", registry)}
}
config.UnqualifiedSearchRegistries[i] = registry
}
return nil
}
// ConfigPath returns the path to the system-wide registry configuration file.
func ConfigPath(ctx *types.SystemContext) string {
confPath := systemRegistriesConfPath
if ctx != nil {
if ctx.SystemRegistriesConfPath != "" {
confPath = ctx.SystemRegistriesConfPath
} else if ctx.RootForImplicitAbsolutePaths != "" {
confPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
}
}
return confPath
}
// configMutex is used to synchronize concurrent accesses to configCache.
var configMutex = sync.Mutex{}
// configCache caches already loaded configs with config paths as keys and is
// used to avoid redudantly parsing configs. Concurrent accesses to the cache
// are synchronized via configMutex.
var configCache = make(map[string]*V2RegistriesConf)
// InvalidateCache invalidates the registry cache. This function is meant to be
// used for long-running processes that need to reload potential changes made to
// the cached registry config files.
func InvalidateCache() {
configMutex.Lock()
defer configMutex.Unlock()
configCache = make(map[string]*V2RegistriesConf)
}
// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached.
func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) {
configPath := ConfigPath(ctx)
configMutex.Lock()
// if the config has already been loaded, return the cached registries
if config, inCache := configCache[configPath]; inCache {
configMutex.Unlock()
return config, nil
}
configMutex.Unlock()
return TryUpdatingCache(ctx)
}
// TryUpdatingCache loads the configuration from the provided `SystemContext`
// without using the internal cache. On success, the loaded configuration will
// be added into the internal registry cache.
func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) {
configPath := ConfigPath(ctx)
configMutex.Lock()
defer configMutex.Unlock()
// load the config
config, err := loadRegistryConf(configPath)
if err != nil {
// Return an empty []Registry if we use the default config,
// which implies that the config path of the SystemContext
// isn't set. Note: if ctx.SystemRegistriesConfPath points to
// the default config, we will still return an error.
if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") {
return &V2RegistriesConf{Registries: []Registry{}}, nil
}
return nil, err
}
v2Config := &config.V2RegistriesConf
// backwards compatibility for v1 configs
if config.V1RegistriesConf.Nonempty() {
if config.V2RegistriesConf.Nonempty() {
return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"}
}
v2, err := config.V1RegistriesConf.ConvertToV2()
if err != nil {
return nil, err
}
v2Config = v2
}
if err := v2Config.postProcess(); err != nil {
return nil, err
}
// populate the cache
configCache[configPath] = v2Config
return v2Config, nil
}
// GetRegistries loads and returns the registries specified in the config.
// Note the parsed content of registry config files is cached. For reloading,
// use `InvalidateCache` and re-call `GetRegistries`.
func GetRegistries(ctx *types.SystemContext) ([]Registry, error) {
config, err := getConfig(ctx)
if err != nil {
return nil, err
}
return config.Registries, nil
}
// UnqualifiedSearchRegistries returns a list of host[:port] entries to try
// for unqualified image search, in the returned order)
func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) {
config, err := getConfig(ctx)
if err != nil {
return nil, err
}
return config.UnqualifiedSearchRegistries, nil
}
// refMatchesPrefix returns true iff ref,
// which is a registry, repository namespace, repository or image reference (as formatted by
// reference.Domain(), reference.Named.Name() or reference.Reference.String()
// — note that this requires the name to start with an explicit hostname!),
// matches a Registry.Prefix value.
// (This is split from the caller primarily to make testing easier.)
func refMatchesPrefix(ref, prefix string) bool {
switch {
case len(ref) < len(prefix):
return false
case len(ref) == len(prefix):
return ref == prefix
case len(ref) > len(prefix):
if !strings.HasPrefix(ref, prefix) {
return false
}
c := ref[len(prefix)]
// This allows "example.com:5000" to match "example.com",
// which is unintended; that will get fixed eventually, DON'T RELY
// ON THE CURRENT BEHAVIOR.
return c == ':' || c == '/' || c == '@'
default:
panic("Internal error: impossible comparison outcome")
}
}
// FindRegistry returns the Registry with the longest prefix for ref,
// which is a registry, repository namespace repository or image reference (as formatted by
// reference.Domain(), reference.Named.Name() or reference.Reference.String()
// — note that this requires the name to start with an explicit hostname!).
// If no Registry prefixes the image, nil is returned.
func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
config, err := getConfig(ctx)
if err != nil {
return nil, err
}
reg := Registry{}
prefixLen := 0
for _, r := range config.Registries {
if refMatchesPrefix(ref, r.Prefix) {
length := len(r.Prefix)
if length > prefixLen {
reg = r
prefixLen = length
}
}
}
if prefixLen != 0 {
return &reg, nil
}
return nil, nil
}
// Loads the registry configuration file from the filesystem and then unmarshals
// it. Returns the unmarshalled object.
func loadRegistryConf(configPath string) (*tomlConfig, error) {
config := &tomlConfig{}
configBytes, err := ioutil.ReadFile(configPath)
if err != nil {
return nil, err
}
err = toml.Unmarshal(configBytes, &config)
return config, err
}

View File

@@ -104,7 +104,7 @@ func splitDockerDomain(name string) (domain, remainder string) {
}
// familiarizeName returns a shortened version of the name familiar
// to to the Docker UI. Familiar names have the default domain
// to the Docker UI. Familiar names have the default domain
// "docker.io" and "library/" repository prefix removed.
// For example, "docker.io/library/redis" will have the familiar
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".

View File

@@ -3,13 +3,13 @@
//
// Grammar
//
// reference := name [ ":" tag ] [ "@" digest ]
// reference := name [ ":" tag ] [ "@" digest ]
// name := [domain '/'] path-component ['/' path-component]*
// domain := domain-component ['.' domain-component]* [':' port-number]
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/
// path-component := alpha-numeric [separator alpha-numeric]*
// alpha-numeric := /[a-z0-9]+/
// path-component := alphanumeric [separator alphanumeric]*
// alphanumeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/
//
// tag := /[\w][\w.-]{0,127}/
@@ -175,7 +175,7 @@ func splitDomain(name string) (string, string) {
// hostname and name string. If no valid hostname is
// found, the hostname is empty and the full value
// is returned as name
// DEPRECATED: Use Domain or Path
// Deprecated: Use Domain or Path
func SplitHostname(named Named) (string, string) {
if r, ok := named.(namedRepository); ok {
return r.Domain(), r.Path()

View File

@@ -0,0 +1,6 @@
package reference
// Return true if the specified string fully matches `IdentifierRegexp`.
func IsFullIdentifier(s string) bool {
return anchoredIdentifierRegexp.MatchString(s)
}

View File

@@ -0,0 +1,156 @@
package reference
import (
"regexp"
"strings"
storageRegexp "github.com/containers/storage/pkg/regexp"
)
const (
// alphaNumeric defines the alpha numeric atom, typically a
// component of names. This only allows lower case characters and digits.
alphaNumeric = `[a-z0-9]+`
// separator defines the separators allowed to be embedded in name
// components. This allow one period, one or two underscore and multiple
// dashes. Repeated dashes and underscores are intentionally treated
// differently. In order to support valid hostnames as name components,
// supporting repeated dash was added. Additionally double underscore is
// now allowed as a separator to loosen the restriction for previously
// supported names.
separator = `(?:[._]|__|[-]*)`
// repository name to start with a component as defined by DomainRegexp
// and followed by an optional port.
domainComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`
// The string counterpart for TagRegexp.
tag = `[\w][\w.-]{0,127}`
// The string counterpart for DigestRegexp.
digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`
// The string counterpart for IdentifierRegexp.
identifier = `([a-f0-9]{64})`
// The string counterpart for ShortIdentifierRegexp.
shortIdentifier = `([a-f0-9]{6,64})`
)
var (
// nameComponent restricts registry path component names to start
// with at least one letter or number, with following parts able to be
// separated by one period, one or two underscore and multiple dashes.
nameComponent = expression(
alphaNumeric,
optional(repeated(separator, alphaNumeric)))
domain = expression(
domainComponent,
optional(repeated(literal(`.`), domainComponent)),
optional(literal(`:`), `[0-9]+`))
// DomainRegexp defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names.
DomainRegexp = re(domain)
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
TagRegexp = re(tag)
anchoredTag = anchored(tag)
// anchoredTagRegexp matches valid tag names, anchored at the start and
// end of the matched string.
anchoredTagRegexp = storageRegexp.Delayed(anchoredTag)
// DigestRegexp matches valid digests.
DigestRegexp = re(digestPat)
anchoredDigest = anchored(digestPat)
// anchoredDigestRegexp matches valid digests, anchored at the start and
// end of the matched string.
anchoredDigestRegexp = storageRegexp.Delayed(anchoredDigest)
namePat = expression(
optional(domain, literal(`/`)),
nameComponent,
optional(repeated(literal(`/`), nameComponent)))
// NameRegexp is the format for the name component of references. The
// regexp has capturing groups for the domain and name part omitting
// the separating forward slash from either.
NameRegexp = re(namePat)
anchoredName = anchored(
optional(capture(domain), literal(`/`)),
capture(nameComponent,
optional(repeated(literal(`/`), nameComponent))))
// anchoredNameRegexp is used to parse a name value, capturing the
// domain and trailing components.
anchoredNameRegexp = storageRegexp.Delayed(anchoredName)
referencePat = anchored(capture(namePat),
optional(literal(":"), capture(tag)),
optional(literal("@"), capture(digestPat)))
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
ReferenceRegexp = re(referencePat)
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
IdentifierRegexp = re(identifier)
// ShortIdentifierRegexp is the format used to represent a prefix
// of an identifier. A prefix may be used to match a sha256 identifier
// within a list of trusted identifiers.
ShortIdentifierRegexp = re(shortIdentifier)
anchoredIdentifier = anchored(identifier)
// anchoredIdentifierRegexp is used to check or match an
// identifier value, anchored at start and end of string.
anchoredIdentifierRegexp = storageRegexp.Delayed(anchoredIdentifier)
)
// re compiles the string to a regular expression.
var re = regexp.MustCompile
// literal compiles s into a literal regular expression, escaping any regexp
// reserved characters.
func literal(s string) string {
return regexp.QuoteMeta(s)
}
// expression defines a full expression, where each regular expression must
// follow the previous.
func expression(res ...string) string {
return strings.Join(res, "")
}
// optional wraps the expression in a non-capturing group and makes the
// production optional.
func optional(res ...string) string {
return group(expression(res...)) + `?`
}
// repeated wraps the regexp in a non-capturing group to get one or more
// matches.
func repeated(res ...string) string {
return group(expression(res...)) + `+`
}
// group wraps the regexp in a non-capturing group.
func group(res ...string) string {
return `(?:` + expression(res...) + `)`
}
// capture wraps the expression in a capturing group.
func capture(res ...string) string {
return `(` + expression(res...) + `)`
}
// anchored anchors the regular expression by adding start and end delimiters.
func anchored(res ...string) string {
return `^` + expression(res...) + `$`
}

View File

@@ -0,0 +1,34 @@
package multierr
import (
"fmt"
"strings"
)
// Format creates an error value from the input array (which should not be empty)
// If the input contains a single error value, it is returned as is.
// If there are multiple, they are formatted as a multi-error (with Unwrap() []error) with the provided initial, separator, and ending strings.
//
// Typical usage:
//
// var errs []error
// // …
// errs = append(errs, …)
// // …
// if errs != nil { return multierr.Format("Failures doing $FOO", "\n* ", "", errs)}
func Format(first, middle, last string, errs []error) error {
switch len(errs) {
case 0:
return fmt.Errorf("internal error: multierr.Format called with 0 errors")
case 1:
return errs[0]
default:
// We have to do this — and this function only really exists — because fmt.Errorf(format, errs...) is invalid:
// []error is not a valid parameter to a function expecting []any
anyErrs := make([]any, 0, len(errs))
for _, e := range errs {
anyErrs = append(anyErrs, e)
}
return fmt.Errorf(first+"%w"+strings.Repeat(middle+"%w", len(errs)-1)+last, anyErrs...)
}
}

View File

@@ -0,0 +1,25 @@
package rootless
import (
"os"
"strconv"
)
// GetRootlessEUID returns the UID of the current user (in the parent userNS, if any)
//
// Podman and similar software, in “rootless” configuration, when run as a non-root
// user, very early switches to a user namespace, where Geteuid() == 0 (but does not
// switch to a limited mount namespace); so, code relying on Geteuid() would use
// system-wide paths in e.g. /var, when the user is actually not privileged to write to
// them, and expects state to be stored in the home directory.
//
// If Podman is setting up such a user namespace, it records the original UID in an
// environment variable, allowing us to make choices based on the actual users identity.
func GetRootlessEUID() int {
euidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
if euidEnv != "" {
euid, _ := strconv.Atoi(euidEnv)
return euid
}
return os.Geteuid()
}

View File

@@ -0,0 +1,71 @@
package internal
import "io"
// CompressorFunc writes the compressed stream to the given writer using the specified compression level.
// The caller must call Close() on the stream (even if the input stream does not need closing!).
type CompressorFunc func(io.Writer, map[string]string, *int) (io.WriteCloser, error)
// DecompressorFunc returns the decompressed stream, given a compressed stream.
// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!).
type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
// Algorithm is a compression algorithm that can be used for CompressStream.
type Algorithm struct {
name string
baseVariantName string
prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection.
decompressor DecompressorFunc
compressor CompressorFunc
}
// NewAlgorithm creates an Algorithm instance.
// nontrivialBaseVariantName is typically "".
// This function exists so that Algorithm instances can only be created by code that
// is allowed to import this internal subpackage.
func NewAlgorithm(name, nontrivialBaseVariantName string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm {
baseVariantName := name
if nontrivialBaseVariantName != "" {
baseVariantName = nontrivialBaseVariantName
}
return Algorithm{
name: name,
baseVariantName: baseVariantName,
prefix: prefix,
decompressor: decompressor,
compressor: compressor,
}
}
// Name returns the name for the compression algorithm.
func (c Algorithm) Name() string {
return c.name
}
// BaseVariantName returns the name of the “base variant” of the compression algorithm.
// It is either equal to Name() of the same algorithm, or equal to Name() of some other Algorithm (the “base variant”).
// This supports a single level of “is-a” relationship between compression algorithms, e.g. where "zstd:chunked" data is valid "zstd" data.
func (c Algorithm) BaseVariantName() string {
return c.baseVariantName
}
// AlgorithmCompressor returns the compressor field of algo.
// This is a function instead of a public method so that it is only callable by code
// that is allowed to import this internal subpackage.
func AlgorithmCompressor(algo Algorithm) CompressorFunc {
return algo.compressor
}
// AlgorithmDecompressor returns the decompressor field of algo.
// This is a function instead of a public method so that it is only callable by code
// that is allowed to import this internal subpackage.
func AlgorithmDecompressor(algo Algorithm) DecompressorFunc {
return algo.decompressor
}
// AlgorithmPrefix returns the prefix field of algo.
// This is a function instead of a public method so that it is only callable by code
// that is allowed to import this internal subpackage.
func AlgorithmPrefix(algo Algorithm) []byte {
return algo.prefix
}

View File

@@ -0,0 +1,41 @@
package types
import (
"github.com/containers/image/v5/pkg/compression/internal"
)
// DecompressorFunc returns the decompressed stream, given a compressed stream.
// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!).
type DecompressorFunc = internal.DecompressorFunc
// Algorithm is a compression algorithm provided and supported by pkg/compression.
// It cant be supplied from the outside.
type Algorithm = internal.Algorithm
const (
// GzipAlgorithmName is the name used by pkg/compression.Gzip.
// NOTE: Importing only this /types package does not inherently guarantee a Gzip algorithm
// will actually be available. (In fact it is intended for this types package not to depend
// on any of the implementations.)
GzipAlgorithmName = "gzip"
// Bzip2AlgorithmName is the name used by pkg/compression.Bzip2.
// NOTE: Importing only this /types package does not inherently guarantee a Bzip2 algorithm
// will actually be available. (In fact it is intended for this types package not to depend
// on any of the implementations.)
Bzip2AlgorithmName = "bzip2"
// XzAlgorithmName is the name used by pkg/compression.Xz.
// NOTE: Importing only this /types package does not inherently guarantee a Xz algorithm
// will actually be available. (In fact it is intended for this types package not to depend
// on any of the implementations.)
XzAlgorithmName = "Xz"
// ZstdAlgorithmName is the name used by pkg/compression.Zstd.
// NOTE: Importing only this /types package does not inherently guarantee a Zstd algorithm
// will actually be available. (In fact it is intended for this types package not to depend
// on any of the implementations.)
ZstdAlgorithmName = "zstd"
// ZstdChunkedAlgorithmName is the name used by pkg/compression.ZstdChunked.
// NOTE: Importing only this /types package does not inherently guarantee a ZstdChunked algorithm
// will actually be available. (In fact it is intended for this types package not to depend
// on any of the implementations.)
ZstdChunkedAlgorithmName = "zstd:chunked"
)

View File

@@ -0,0 +1,12 @@
//go:build !freebsd
// +build !freebsd
package sysregistriesv2
// builtinRegistriesConfPath is the path to the registry configuration file.
// DO NOT change this, instead see systemRegistriesConfPath above.
const builtinRegistriesConfPath = "/etc/containers/registries.conf"
// builtinRegistriesConfDirPath is the path to the registry configuration directory.
// DO NOT change this, instead see systemRegistriesConfDirectoryPath above.
const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d"

View File

@@ -0,0 +1,12 @@
//go:build freebsd
// +build freebsd
package sysregistriesv2
// builtinRegistriesConfPath is the path to the registry configuration file.
// DO NOT change this, instead see systemRegistriesConfPath above.
const builtinRegistriesConfPath = "/usr/local/etc/containers/registries.conf"
// builtinRegistriesConfDirPath is the path to the registry configuration directory.
// DO NOT change this, instead see systemRegistriesConfDirectoryPath above.
const builtinRegistriesConfDirPath = "/usr/local/etc/containers/registries.conf.d"

View File

@@ -0,0 +1,347 @@
package sysregistriesv2
import (
"fmt"
"maps"
"os"
"path/filepath"
"reflect"
"strings"
"github.com/BurntSushi/toml"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/multierr"
"github.com/containers/image/v5/internal/rootless"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/lockfile"
"github.com/sirupsen/logrus"
)
// defaultShortNameMode is the default mode of registries.conf files if the
// corresponding field is left empty.
const defaultShortNameMode = types.ShortNameModePermissive
// userShortNamesFile is the user-specific config file to store aliases.
var userShortNamesFile = filepath.FromSlash("containers/short-name-aliases.conf")
// shortNameAliasesConfPath returns the path to the machine-generated
// short-name-aliases.conf file.
func shortNameAliasesConfPath(ctx *types.SystemContext) (string, error) {
if ctx != nil && len(ctx.UserShortNameAliasConfPath) > 0 {
return ctx.UserShortNameAliasConfPath, nil
}
if rootless.GetRootlessEUID() == 0 {
// Root user or in a non-conforming user NS
return filepath.Join("/var/cache", userShortNamesFile), nil
}
// Rootless user
cacheRoot, err := homedir.GetCacheHome()
if err != nil {
return "", err
}
return filepath.Join(cacheRoot, userShortNamesFile), nil
}
// shortNameAliasConf is a subset of the `V2RegistriesConf` format. It's used in the
// software-maintained `userShortNamesFile`.
type shortNameAliasConf struct {
// A map for aliasing short names to their fully-qualified image
// reference counter parts.
// Note that Aliases is niled after being loaded from a file.
Aliases map[string]string `toml:"aliases"`
// If you add any field, make sure to update nonempty() below.
}
// nonempty returns true if config contains at least one configuration entry.
func (c *shortNameAliasConf) nonempty() bool {
copy := *c // A shallow copy
if copy.Aliases != nil && len(copy.Aliases) == 0 {
copy.Aliases = nil
}
return !reflect.DeepEqual(copy, shortNameAliasConf{})
}
// alias combines the parsed value of an alias with the config file it has been
// specified in. The config file is crucial for an improved user experience
// such that users are able to resolve potential pull errors.
type alias struct {
// The parsed value of an alias. May be nil if set to "" in a config.
value reference.Named
// The config file the alias originates from.
configOrigin string
}
// shortNameAliasCache is the result of parsing shortNameAliasConf,
// pre-processed for faster usage.
type shortNameAliasCache struct {
// Note that an alias value may be nil iff it's set as an empty string
// in the config.
namedAliases map[string]alias
}
// ResolveShortNameAlias performs an alias resolution of the specified name.
// The user-specific short-name-aliases.conf has precedence over aliases in the
// assembled registries.conf. It returns the possibly resolved alias or nil, a
// human-readable description of the config where the alias is specified, and
// an error. The origin of the config file is crucial for an improved user
// experience such that users are able to resolve potential pull errors.
// Almost all callers should use pkg/shortnames instead.
//
// Note that its the callers responsibility to pass only a repository
// (reference.IsNameOnly) as the short name.
func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Named, string, error) {
if err := validateShortName(name); err != nil {
return nil, "", err
}
confPath, lock, err := shortNameAliasesConfPathAndLock(ctx)
if err != nil {
return nil, "", err
}
// Acquire the lock as a reader to allow for multiple routines in the
// same process space to read simultaneously.
lock.RLock()
defer lock.Unlock()
_, aliasCache, err := loadShortNameAliasConf(confPath)
if err != nil {
return nil, "", err
}
// First look up the short-name-aliases.conf. Note that a value may be
// nil iff it's set as an empty string in the config.
alias, resolved := aliasCache.namedAliases[name]
if resolved {
return alias.value, alias.configOrigin, nil
}
config, err := getConfig(ctx)
if err != nil {
return nil, "", err
}
alias, resolved = config.aliasCache.namedAliases[name]
if resolved {
return alias.value, alias.configOrigin, nil
}
return nil, "", nil
}
// editShortNameAlias loads the aliases.conf file and changes it. If value is
// set, it adds the name-value pair as a new alias. Otherwise, it will remove
// name from the config.
func editShortNameAlias(ctx *types.SystemContext, name string, value *string) error {
if err := validateShortName(name); err != nil {
return err
}
if value != nil {
if _, err := parseShortNameValue(*value); err != nil {
return err
}
}
confPath, lock, err := shortNameAliasesConfPathAndLock(ctx)
if err != nil {
return err
}
// Acquire the lock as a writer to prevent data corruption.
lock.Lock()
defer lock.Unlock()
// Load the short-name-alias.conf, add the specified name-value pair,
// and write it back to the file.
conf, _, err := loadShortNameAliasConf(confPath)
if err != nil {
return err
}
if conf.Aliases == nil { // Ensure we have a map to update.
conf.Aliases = make(map[string]string)
}
if value != nil {
conf.Aliases[name] = *value
} else {
// If the name does not exist, throw an error.
if _, exists := conf.Aliases[name]; !exists {
return fmt.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath)
}
delete(conf.Aliases, name)
}
f, err := os.OpenFile(confPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer f.Close()
encoder := toml.NewEncoder(f)
return encoder.Encode(conf)
}
// AddShortNameAlias adds the specified name-value pair as a new alias to the
// user-specific aliases.conf. It may override an existing alias for `name`.
//
// Note that its the callers responsibility to pass only a repository
// (reference.IsNameOnly) as the short name.
func AddShortNameAlias(ctx *types.SystemContext, name string, value string) error {
return editShortNameAlias(ctx, name, &value)
}
// RemoveShortNameAlias clears the alias for the specified name. It throws an
// error in case name does not exist in the machine-generated
// short-name-alias.conf. In such case, the alias must be specified in one of
// the registries.conf files, which is the users' responsibility.
//
// Note that its the callers responsibility to pass only a repository
// (reference.IsNameOnly) as the short name.
func RemoveShortNameAlias(ctx *types.SystemContext, name string) error {
return editShortNameAlias(ctx, name, nil)
}
// parseShortNameValue parses the specified alias into a reference.Named. The alias is
// expected to not be tagged or carry a digest and *must* include a
// domain/registry.
//
// Note that the returned reference is always normalized.
func parseShortNameValue(alias string) (reference.Named, error) {
ref, err := reference.Parse(alias)
if err != nil {
return nil, fmt.Errorf("parsing alias %q: %w", alias, err)
}
if _, ok := ref.(reference.Digested); ok {
return nil, fmt.Errorf("invalid alias %q: must not contain digest", alias)
}
if _, ok := ref.(reference.Tagged); ok {
return nil, fmt.Errorf("invalid alias %q: must not contain tag", alias)
}
named, ok := ref.(reference.Named)
if !ok {
return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias)
}
registry := reference.Domain(named)
if !(strings.ContainsAny(registry, ".:") || registry == "localhost") {
return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias)
}
// A final parse to make sure that docker.io references are correctly
// normalized (e.g., docker.io/alpine to docker.io/library/alpine.
named, err = reference.ParseNormalizedNamed(alias)
return named, err
}
// validateShortName parses the specified `name` of an alias (i.e., the left-hand
// side) and checks if it's a short name and does not include a tag or digest.
func validateShortName(name string) error {
repo, err := reference.Parse(name)
if err != nil {
return fmt.Errorf("cannot parse short name: %q: %w", name, err)
}
if _, ok := repo.(reference.Digested); ok {
return fmt.Errorf("invalid short name %q: must not contain digest", name)
}
if _, ok := repo.(reference.Tagged); ok {
return fmt.Errorf("invalid short name %q: must not contain tag", name)
}
named, ok := repo.(reference.Named)
if !ok {
return fmt.Errorf("invalid short name %q: no name", name)
}
registry := reference.Domain(named)
if strings.ContainsAny(registry, ".:") || registry == "localhost" {
return fmt.Errorf("invalid short name %q: must not contain registry", name)
}
return nil
}
// newShortNameAliasCache parses shortNameAliasConf and returns the corresponding internal
// representation.
func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAliasCache, error) {
res := shortNameAliasCache{
namedAliases: make(map[string]alias),
}
errs := []error{}
for name, value := range conf.Aliases {
if err := validateShortName(name); err != nil {
errs = append(errs, err)
}
// Empty right-hand side values in config files allow to reset
// an alias in a previously loaded config. This way, drop-in
// config files from registries.conf.d can reset potentially
// malconfigured aliases.
if value == "" {
res.namedAliases[name] = alias{nil, path}
continue
}
named, err := parseShortNameValue(value)
if err != nil {
// We want to report *all* malformed entries to avoid a
// whack-a-mole for the user.
errs = append(errs, err)
} else {
res.namedAliases[name] = alias{named, path}
}
}
if len(errs) > 0 {
return nil, multierr.Format("", "\n", "", errs)
}
return &res, nil
}
// updateWithConfigurationFrom updates c with configuration from updates.
// In case of conflict, updates is preferred.
func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) {
maps.Copy(c.namedAliases, updates.namedAliases)
}
func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) {
conf := shortNameAliasConf{}
meta, err := toml.DecodeFile(confPath, &conf)
if err != nil && !os.IsNotExist(err) {
// It's okay if the config doesn't exist. Other errors are not.
return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err)
}
if keys := meta.Undecoded(); len(keys) > 0 {
logrus.Debugf("Failed to decode keys %q from %q", keys, confPath)
}
// Even if we dont always need the cache, doing so validates the machine-generated config. The
// file could still be corrupted by another process or user.
cache, err := newShortNameAliasCache(confPath, &conf)
if err != nil {
return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err)
}
return &conf, cache, nil
}
func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, *lockfile.LockFile, error) {
shortNameAliasesConfPath, err := shortNameAliasesConfPath(ctx)
if err != nil {
return "", nil, err
}
// Make sure the path to file exists.
if err := os.MkdirAll(filepath.Dir(shortNameAliasesConfPath), 0700); err != nil {
return "", nil, err
}
lockPath := shortNameAliasesConfPath + ".lock"
locker, err := lockfile.GetLockFile(lockPath)
return shortNameAliasesConfPath, locker, err
}

File diff suppressed because it is too large Load Diff

View File

@@ -5,12 +5,13 @@ import (
"io"
"time"
"github.com/containers/image/docker/reference"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/containers/image/v5/docker/reference"
compression "github.com/containers/image/v5/pkg/compression/types"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// ImageTransport is a top-level namespace for ways to to store/load an image.
// ImageTransport is a top-level namespace for ways to store/load an image.
// It should generally correspond to ImageSource/ImageDestination implementations.
//
// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport.
@@ -47,7 +48,7 @@ type ImageReference interface {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
// instead, see transports.ImageName().
StringWithinTransport() string
@@ -90,6 +91,32 @@ type ImageReference interface {
DeleteImage(ctx context.Context, sys *SystemContext) error
}
// LayerCompression indicates if layers must be compressed, decompressed or preserved
type LayerCompression int
const (
// PreserveOriginal indicates the layer must be preserved, ie
// no compression or decompression.
PreserveOriginal LayerCompression = iota
// Decompress indicates the layer must be decompressed
Decompress
// Compress indicates the layer must be compressed
Compress
)
// LayerCrypto indicates if layers have been encrypted or decrypted or none
type LayerCrypto int
const (
// PreserveOriginalCrypto indicates the layer must be preserved, ie
// no encryption/decryption
PreserveOriginalCrypto LayerCrypto = iota
// Encrypt indicates the layer is encrypted
Encrypt
// Decrypt indicates the layer is decrypted
Decrypt
)
// BlobInfo collects known information about a blob (layer/config).
// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that.
type BlobInfo struct {
@@ -98,10 +125,37 @@ type BlobInfo struct {
URLs []string
Annotations map[string]string
MediaType string
// NOTE: The following fields contain desired _edits_ to blob infos.
// Conceptually then don't belong in the BlobInfo object at all;
// the edits should be provided specifically as parameters to the edit implementation.
// We cant remove the fields without breaking compatibility, but dont
// add any more.
// CompressionOperation is used in Image.UpdateLayerInfos to instruct
// whether the original layer's "compressed or not" should be preserved,
// possibly while changing the compression algorithm from one to another,
// or if it should be changed to compressed or decompressed.
// The field defaults to preserve the original layer's compressedness.
// TODO: To remove together with CryptoOperation in re-design to remove
// field out of BlobInfo.
CompressionOperation LayerCompression
// CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct
// MIME type for compressed layers (e.g., gzip or zstd). This field MUST be
// set when `CompressionOperation == Compress` and MAY be set when
// `CompressionOperation == PreserveOriginal` and the compression type is
// being changed for an already-compressed layer.
CompressionAlgorithm *compression.Algorithm
// CryptoOperation is used in Image.UpdateLayerInfos to instruct
// whether the original layer was encrypted/decrypted
// TODO: To remove together with CompressionOperation in re-design to
// remove field out of BlobInfo.
CryptoOperation LayerCrypto
// Before adding any fields to this struct, read the NOTE above.
}
// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present.
// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest).
// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data about blobs keyed by (scope, digest).
// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable.
//
// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
@@ -128,30 +182,34 @@ type BICReplacementCandidate struct {
Location BICLocationReference
}
// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies.
// BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies.
//
// It records two kinds of data:
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion),
// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
//
// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression),
// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
//
// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
// compress/decompress blobs for their own purposes.
// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
//
// - Known blob locations, managed by individual transports:
// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
// recording transport-specific information that allows the transport to reuse the blob in the future;
// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
// compress/decompress blobs for their own purposes.
//
// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
// can be directly reused within a registry, or mounted across registries within a registry server.)
// - Known blob locations, managed by individual transports:
// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
// recording transport-specific information that allows the transport to reuse the blob in the future;
// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
//
// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
// can be directly reused within a registry, or mounted across registries within a registry server.)
//
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
// users of the cahce should just fall back to copying the blobs the usual way.
// users of the cache should just fall back to copying the blobs the usual way.
//
// The BlobInfoCache interface is deprecated. Consumers of this library should use one of the implementations provided by
// subpackages of the library's "pkg/blobinfocache" package in preference to implementing the interface on their own.
type BlobInfoCache interface {
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// May return anyDigest if it is known to be uncompressed.
@@ -170,7 +228,7 @@ type BlobInfoCache interface {
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
// within the specified (transport scope) (if they still exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate
@@ -205,25 +263,17 @@ type ImageSource interface {
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error)
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest.
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
// to read the image's layers.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
LayerInfosForCopy(ctx context.Context) ([]BlobInfo, error)
LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]BlobInfo, error)
}
// LayerCompression indicates if layers must be compressed, decompressed or preserved
type LayerCompression int
const (
// PreserveOriginal indicates the layer must be preserved, ie
// no compression or decompression.
PreserveOriginal LayerCompression = iota
// Decompress indicates the layer must be decompressed
Decompress
// Compress indicates the layer must be compressed
Compress
)
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
//
// There is a specific required order for some of the calls:
@@ -250,7 +300,7 @@ type ImageDestination interface {
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
AcceptsForeignLayerURLs() bool
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
MustMatchRuntimeOS() bool
// IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
@@ -258,7 +308,7 @@ type ImageDestination interface {
IgnoresEmbeddedDockerReference() bool
// PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
// May update cache.
@@ -272,21 +322,34 @@ type ImageDestination interface {
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)
// PutManifest writes manifest to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated
// by `manifest.Digest()`.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
PutManifest(ctx context.Context, manifest []byte) error
PutSignatures(ctx context.Context, signatures [][]byte) error
PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error
// PutSignatures writes a set of signatures to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
// MUST be called after PutManifest (signatures may reference manifest contents).
PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
// original manifest list digest, if desired.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
Commit(ctx context.Context) error
Commit(ctx context.Context, unparsedToplevel UnparsedImage) error
}
// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available,
@@ -355,7 +418,19 @@ type Image interface {
// UpdatedImage returns a types.Image modified according to options.
// Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired.
// This does not change the state of the original Image object.
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if
// manifests of type options.ManifestMIMEType can not include layers that are compressed
// in accordance with the CompressionOperation and CompressionAlgorithm specified in one
// or more options.LayerInfos items, though retrying with a different
// options.ManifestMIMEType or with different CompressionOperation+CompressionAlgorithm
// values might succeed.
UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error)
// SupportsEncryption returns an indicator that the image supports encryption
//
// Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since
// the process of updating a manifest between different manifest types was to update then convert.
// This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836
SupportsEncryption(ctx context.Context) bool
// Size returns an approximation of the amount of disk space which is consumed by the image in its current
// location. If the size is not known, -1 will be returned.
Size() (int64, error)
@@ -370,7 +445,7 @@ type ImageCloser interface {
Close() error
}
// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest
// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedImage
type ManifestUpdateOptions struct {
LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored.
EmbeddedDockerReference reference.Named
@@ -382,7 +457,7 @@ type ManifestUpdateOptions struct {
// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here
// only to make writing struct literals possible.
type ManifestUpdateInformation struct {
Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go)
Destination ImageDestination // and yes, UpdatedImage may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go)
LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers)
LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order.
}
@@ -396,9 +471,20 @@ type ImageInspectInfo struct {
DockerVersion string
Labels map[string]string
Architecture string
Variant string
Os string
Layers []string
LayersData []ImageInspectLayer
Env []string
Author string
}
// ImageInspectLayer is a set of metadata describing an image layers' detail
type ImageInspectLayer struct {
MIMEType string // "" if unknown.
Digest digest.Digest
Size int64 // -1 if unknown.
Annotations map[string]string
}
// DockerAuthConfig contains authorization information for connecting to a registry.
@@ -406,6 +492,11 @@ type ImageInspectInfo struct {
type DockerAuthConfig struct {
Username string
Password string
// IdentityToken can be used as an refresh_token in place of username and
// password to obtain the bearer/access token in oauth2 flow. If identity
// token is set, password should not be set.
// Ref: https://docs.docker.com/registry/spec/auth/oauth/
IdentityToken string
}
// OptionalBool is a boolean with an additional undefined value, which is meant
@@ -426,12 +517,42 @@ const (
// OptionalBoolFalse. The function is meant to avoid boilerplate code of users.
func NewOptionalBool(b bool) OptionalBool {
o := OptionalBoolFalse
if b == true {
if b {
o = OptionalBoolTrue
}
return o
}
// ShortNameMode defines the mode of short-name resolution.
//
// The use of unqualified-search registries entails an ambiguity as it's
// unclear from which registry a given image, referenced by a short name, may
// be pulled from.
//
// The ShortNameMode type defines how short names should resolve.
type ShortNameMode int
const (
ShortNameModeInvalid ShortNameMode = iota
// Use all configured unqualified-search registries without prompting
// the user.
ShortNameModeDisabled
// If stdout and stdin are a TTY, prompt the user to select a configured
// unqualified-search registry. Otherwise, use all configured
// unqualified-search registries.
//
// Note that if only one unqualified-search registry is set, it will be
// used without prompting.
ShortNameModePermissive
// Always prompt the user to select a configured unqualified-search
// registry. Throw an error if stdout or stdin is not a TTY as
// prompting isn't possible.
//
// Note that if only one unqualified-search registry is set, it will be
// used without prompting.
ShortNameModeEnforcing
)
// SystemContext allows parameterizing access to implicitly-accessed resources,
// like configuration files in /etc and users' login state in their home directory.
// Various components can share the same field only if their semantics is exactly
@@ -453,21 +574,46 @@ type SystemContext struct {
RegistriesDirPath string
// Path to the system-wide registries configuration file
SystemRegistriesConfPath string
// If not "", overrides the default path for the authentication file
// Path to the system-wide registries configuration directory
SystemRegistriesConfDirPath string
// Path to the user-specific short-names configuration file
UserShortNameAliasConfPath string
// If set, short-name resolution in pkg/shortnames must follow the specified mode
ShortNameMode *ShortNameMode
// If set, short names will resolve in pkg/shortnames to docker.io only, and unqualified-search registries and
// short-name aliases in registries.conf are ignored. Note that this field is only intended to help enforce
// resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this
// specific context.
PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool
// If not "", overrides the default path for the registry authentication file, but only new format files
AuthFilePath string
// if not "", overrides the default path for the registry authentication file, but with the legacy format;
// the code currently will by default look for legacy format files like .dockercfg in the $HOME dir;
// but in addition to the home dir, openshift may mount .dockercfg files (via secret mount)
// in locations other than the home dir; openshift components should then set this field in those cases;
// this field is ignored if `AuthFilePath` is set (we favor the newer format);
// only reading of this data is supported;
LegacyFormatAuthFilePath string
// If set, a path to a Docker-compatible "config.json" file containing credentials; and no other files are processed.
// This must not be set if AuthFilePath is set.
// Only credentials and credential helpers in this file apre processed, not any other configuration in this file.
DockerCompatAuthFilePath string
// If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match.
ArchitectureChoice string
// If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match.
OSChoice string
// If not "", overrides the use of detected ARM platform variant when choosing an image or verifying variant match.
VariantChoice string
// If not "", overrides the system's default directory containing a blob info cache.
BlobInfoCacheDir string
// Additional tags when creating or copying a docker-archive.
DockerArchiveAdditionalTags []reference.NamedTagged
// If not "", overrides the temporary directory to use for storing big files
BigFilesTemporaryDir string
// === OCI.Transport overrides ===
// If not "", a directory containing a CA certificate (ending with ".crt"),
// a client certificate (ending with ".cert") and a client ceritificate key
// a client certificate (ending with ".cert") and a client certificate key
// (ending with ".key") used when downloading OCI image layers.
OCICertPath string
// Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
@@ -479,24 +625,35 @@ type SystemContext struct {
// === docker.Transport overrides ===
// If not "", a directory containing a CA certificate (ending with ".crt"),
// a client certificate (ending with ".cert") and a client ceritificate key
// (ending with ".key") used when talking to a Docker Registry.
// a client certificate (ending with ".cert") and a client certificate key
// (ending with ".key") used when talking to a container registry.
DockerCertPath string
// If not "", overrides the systems default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above.
// Ignored if DockerCertPath is non-empty.
DockerPerHostCertDirPath string
// Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
// Allow contacting container registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
DockerInsecureSkipTLSVerify OptionalBool
// if nil, the library tries to parse ~/.docker/config.json to retrieve credentials
// Ignored if DockerBearerRegistryToken is non-empty.
DockerAuthConfig *DockerAuthConfig
// if not "", the library uses this registry token to authenticate to the registry
DockerBearerRegistryToken string
// if not "", an User-Agent header is added to each request when contacting a registry.
DockerRegistryUserAgent string
// if true, a V1 ping attempt isn't done to give users a better error. Default is false.
// Note that this field is used mainly to integrate containers/image into projectatomic/docker
// in order to not break any existing docker's integration tests.
DockerDisableV1Ping bool
// If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list
DockerDisableDestSchema1MIMETypes bool
// If true, the physical pull source of docker transport images logged as info level
DockerLogMirrorChoice bool
// Directory to use for OSTree temporary files
OSTreeTmpDirPath string
// If true, all blobs will have precomputed digests to ensure layers are not uploaded that already exist on the registry.
// Note that this requires writing blobs to temporary files, and takes more time than the default behavior,
// when the digest for a blob is unknown.
DockerRegistryPushPrecomputeDigests bool
// === docker/daemon.Transport overrides ===
// A directory containing a CA certificate (ending with ".crt"),
@@ -511,11 +668,50 @@ type SystemContext struct {
// === dir.Transport overrides ===
// DirForceCompress compresses the image layers if set to true
DirForceCompress bool
// DirForceDecompress decompresses the image layers if set to true
DirForceDecompress bool
// CompressionFormat is the format to use for the compression of the blobs
CompressionFormat *compression.Algorithm
// CompressionLevel specifies what compression level is used
CompressionLevel *int
}
// ProgressEvent is the type of events a progress reader can produce
// Warning: new event types may be added any time.
type ProgressEvent uint
const (
// ProgressEventNewArtifact will be fired on progress reader setup
ProgressEventNewArtifact ProgressEvent = iota
// ProgressEventRead indicates that the artifact download is currently in
// progress
ProgressEventRead
// ProgressEventDone is fired when the data transfer has been finished for
// the specific artifact
ProgressEventDone
// ProgressEventSkipped is fired when the artifact has been skipped because
// its already available at the destination
ProgressEventSkipped
)
// ProgressProperties is used to pass information from the copy code to a monitor which
// can use the real-time information to produce output or react to changes.
type ProgressProperties struct {
// The event indicating what
Event ProgressEvent
// The artifact which has been updated in this interval
Artifact BlobInfo
Offset uint64
// The currently downloaded size in bytes
// Increases from 0 to the final Artifact size
Offset uint64
// The additional offset which has been downloaded inside the last update
// interval. Will be reset after each ProgressEventRead event.
OffsetUpdate uint64
}

1523
vendor/github.com/containers/storage/AUTHORS generated vendored Normal file

File diff suppressed because it is too large Load Diff

191
vendor/github.com/containers/storage/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

19
vendor/github.com/containers/storage/NOTICE generated vendored Normal file
View File

@@ -0,0 +1,19 @@
Docker
Copyright 2012-2016 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
This product contains software (https://github.com/kr/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.

View File

@@ -0,0 +1,34 @@
//go:build !windows
// +build !windows
package fileutils
import (
"os"
"golang.org/x/sys/unix"
)
// Exists checks whether a file or directory exists at the given path.
// If the path is a symlink, the symlink is followed.
func Exists(path string) error {
// It uses unix.Faccessat which is a faster operation compared to os.Stat for
// simply checking the existence of a file.
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0)
if err != nil {
return &os.PathError{Op: "faccessat", Path: path, Err: err}
}
return nil
}
// Lexists checks whether a file or directory exists at the given path.
// If the path is a symlink, the symlink itself is checked.
func Lexists(path string) error {
// It uses unix.Faccessat which is a faster operation compared to os.Stat for
// simply checking the existence of a file.
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
return &os.PathError{Op: "faccessat", Path: path, Err: err}
}
return nil
}

View File

@@ -0,0 +1,18 @@
package fileutils
import (
"os"
)
// Exists checks whether a file or directory exists at the given path.
func Exists(path string) error {
_, err := os.Stat(path)
return err
}
// Lexists checks whether a file or directory exists at the given path, without
// resolving symlinks
func Lexists(path string) error {
_, err := os.Lstat(path)
return err
}

View File

@@ -0,0 +1,371 @@
package fileutils
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"strings"
"text/scanner"
"github.com/sirupsen/logrus"
)
// PatternMatcher allows checking paths against a list of patterns
type PatternMatcher struct {
patterns []*Pattern
exclusions bool
}
// NewPatternMatcher creates a new matcher object for specific patterns that can
// be used later to match against patterns against paths
func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
pm := &PatternMatcher{
patterns: make([]*Pattern, 0, len(patterns)),
}
for _, p := range patterns {
// Eliminate leading and trailing whitespace.
p = strings.TrimSpace(p)
if p == "" {
continue
}
p = filepath.Clean(p)
newp := &Pattern{}
if p[0] == '!' {
if len(p) == 1 {
return nil, errors.New("illegal exclusion pattern: \"!\"")
}
newp.exclusion = true
p = strings.TrimPrefix(filepath.Clean(p[1:]), "/")
pm.exclusions = true
}
// Do some syntax checking on the pattern.
// filepath's Match() has some really weird rules that are inconsistent
// so instead of trying to dup their logic, just call Match() for its
// error state and if there is an error in the pattern return it.
// If this becomes an issue we can remove this since its really only
// needed in the error (syntax) case - which isn't really critical.
if _, err := filepath.Match(p, "."); err != nil {
return nil, err
}
newp.cleanedPattern = p
newp.dirs = strings.Split(p, string(os.PathSeparator))
pm.patterns = append(pm.patterns, newp)
}
return pm, nil
}
// Deprecated: Please use the `MatchesResult` method instead.
// Matches matches path against all the patterns. Matches is not safe to be
// called concurrently
func (pm *PatternMatcher) Matches(file string) (bool, error) {
matched := false
file = filepath.FromSlash(file)
for _, pattern := range pm.patterns {
negative := false
if pattern.exclusion {
negative = true
}
match, err := pattern.match(file)
if err != nil {
return false, err
}
if match {
matched = !negative
}
}
if matched {
logrus.Debugf("Skipping excluded path: %s", file)
}
return matched, nil
}
type MatchResult struct {
isMatched bool
matches, excludes uint
}
// Excludes returns true if the overall result is matched
func (m *MatchResult) IsMatched() bool {
return m.isMatched
}
// Excludes returns the amount of matches of an MatchResult
func (m *MatchResult) Matches() uint {
return m.matches
}
// Excludes returns the amount of excludes of an MatchResult
func (m *MatchResult) Excludes() uint {
return m.excludes
}
// MatchesResult verifies the provided filepath against all patterns.
// It returns the `*MatchResult` result for the patterns on success, otherwise
// an error. This method is not safe to be called concurrently.
func (pm *PatternMatcher) MatchesResult(file string) (res *MatchResult, err error) {
file = filepath.FromSlash(file)
res = &MatchResult{false, 0, 0}
for _, pattern := range pm.patterns {
negative := false
if pattern.exclusion {
negative = true
}
match, err := pattern.match(file)
if err != nil {
return nil, err
}
if match {
res.isMatched = !negative
if negative {
res.excludes++
} else {
res.matches++
}
}
}
if res.matches > 0 {
logrus.Debugf("Skipping excluded path: %s", file)
}
return res, nil
}
// IsMatch verifies the provided filepath against all patterns and returns true
// if it matches. A match is valid if the last match is a positive one.
// It returns an error on failure and is not safe to be called concurrently.
func (pm *PatternMatcher) IsMatch(file string) (matched bool, err error) {
res, err := pm.MatchesResult(file)
if err != nil {
return false, err
}
return res.isMatched, nil
}
// Exclusions returns true if any of the patterns define exclusions
func (pm *PatternMatcher) Exclusions() bool {
return pm.exclusions
}
// Patterns returns array of active patterns
func (pm *PatternMatcher) Patterns() []*Pattern {
return pm.patterns
}
// Pattern defines a single regexp used to filter file paths.
type Pattern struct {
cleanedPattern string
dirs []string
regexp *regexp.Regexp
exclusion bool
}
func (p *Pattern) String() string {
return p.cleanedPattern
}
// Exclusion returns true if this pattern defines exclusion
func (p *Pattern) Exclusion() bool {
return p.exclusion
}
func (p *Pattern) match(path string) (bool, error) {
if p.regexp == nil {
if err := p.compile(); err != nil {
return false, filepath.ErrBadPattern
}
}
b := p.regexp.MatchString(path)
return b, nil
}
func (p *Pattern) compile() error {
regStr := "^"
pattern := p.cleanedPattern
// Go through the pattern and convert it to a regexp.
// We use a scanner so we can support utf-8 chars.
var scan scanner.Scanner
scan.Init(strings.NewReader(pattern))
sl := string(os.PathSeparator)
escSL := sl
const bs = `\`
if sl == bs {
escSL += bs
}
for scan.Peek() != scanner.EOF {
ch := scan.Next()
if ch == '*' {
if scan.Peek() == '*' {
// is some flavor of "**"
scan.Next()
// Treat **/ as ** so eat the "/"
if string(scan.Peek()) == sl {
scan.Next()
}
if scan.Peek() == scanner.EOF {
// is "**EOF" - to align with .gitignore just accept all
regStr += ".*"
} else {
// is "**"
// Note that this allows for any # of /'s (even 0) because
// the .* will eat everything, even /'s
regStr += "(.*" + escSL + ")?"
}
} else {
// is "*" so map it to anything but "/"
regStr += "[^" + escSL + "]*"
}
} else if ch == '?' {
// "?" is any char except "/"
regStr += "[^" + escSL + "]"
} else if ch == '.' || ch == '$' {
// Escape some regexp special chars that have no meaning
// in golang's filepath.Match
regStr += bs + string(ch)
} else if ch == '\\' {
// escape next char.
if sl == bs {
// On windows map "\" to "\\", meaning an escaped backslash,
// and then just continue because filepath.Match on
// Windows doesn't allow escaping at all
regStr += escSL
continue
}
if scan.Peek() != scanner.EOF {
regStr += bs + string(scan.Next())
} else {
return filepath.ErrBadPattern
}
} else {
regStr += string(ch)
}
}
regStr += "(" + escSL + ".*)?$"
re, err := regexp.Compile(regStr)
if err != nil {
return err
}
p.regexp = re
return nil
}
// Matches returns true if file matches any of the patterns
// and isn't excluded by any of the subsequent patterns.
func Matches(file string, patterns []string) (bool, error) {
pm, err := NewPatternMatcher(patterns)
if err != nil {
return false, err
}
file = filepath.Clean(file)
if file == "." {
// Don't let them exclude everything, kind of silly.
return false, nil
}
return pm.IsMatch(file)
}
// CopyFile copies from src to dst until either EOF is reached
// on src or an error occurs. It verifies src exists and removes
// the dst if it exists.
func CopyFile(src, dst string) (int64, error) {
cleanSrc := filepath.Clean(src)
cleanDst := filepath.Clean(dst)
if cleanSrc == cleanDst {
return 0, nil
}
sf, err := os.Open(cleanSrc)
if err != nil {
return 0, err
}
defer sf.Close()
if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
return 0, err
}
df, err := os.Create(cleanDst)
if err != nil {
return 0, err
}
defer df.Close()
return io.Copy(df, sf)
}
// ReadSymlinkedDirectory returns the target directory of a symlink.
// The target of the symbolic link may not be a file.
func ReadSymlinkedDirectory(path string) (string, error) {
var realPath string
var err error
if realPath, err = filepath.Abs(path); err != nil {
return "", fmt.Errorf("unable to get absolute path for %s: %w", path, err)
}
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
return "", fmt.Errorf("failed to canonicalise path for %s: %w", path, err)
}
realPathInfo, err := os.Stat(realPath)
if err != nil {
return "", fmt.Errorf("failed to stat target '%s' of '%s': %w", realPath, path, err)
}
if !realPathInfo.Mode().IsDir() {
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
}
return realPath, nil
}
// ReadSymlinkedPath returns the target directory of a symlink.
// The target of the symbolic link can be a file and a directory.
func ReadSymlinkedPath(path string) (realPath string, err error) {
if realPath, err = filepath.Abs(path); err != nil {
return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err)
}
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err)
}
if err := Exists(realPath); err != nil {
return "", fmt.Errorf("failed to stat target %q of %q: %w", realPath, path, err)
}
return realPath, nil
}
// CreateIfNotExists creates a file or a directory only if it does not already exist.
func CreateIfNotExists(path string, isDir bool) error {
if err := Exists(path); err != nil {
if os.IsNotExist(err) {
if isDir {
return os.MkdirAll(path, 0o755)
}
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
return err
}
f, err := os.OpenFile(path, os.O_CREATE, 0o755)
if err != nil {
return err
}
f.Close()
}
}
return nil
}

View File

@@ -0,0 +1,27 @@
package fileutils
import (
"os"
"os/exec"
"strconv"
"strings"
)
// GetTotalUsedFds returns the number of used File Descriptors by
// executing `lsof -p PID`
func GetTotalUsedFds() int {
pid := os.Getpid()
cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
output, err := cmd.CombinedOutput()
if err != nil {
return -1
}
outputStr := strings.TrimSpace(string(output))
fds := strings.Split(outputStr, "\n")
return len(fds) - 1
}

View File

@@ -0,0 +1,7 @@
package fileutils
// GetTotalUsedFds Returns the number of used File Descriptors.
// On Solaris these limits are per process and not systemwide
func GetTotalUsedFds() int {
return -1
}

View File

@@ -0,0 +1,22 @@
//go:build linux || freebsd
// +build linux freebsd
package fileutils
import (
"fmt"
"os"
"github.com/sirupsen/logrus"
)
// GetTotalUsedFds Returns the number of used File Descriptors by
// reading it via /proc filesystem.
func GetTotalUsedFds() int {
if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
logrus.Errorf("%v", err)
} else {
return len(fds)
}
return -1
}

View File

@@ -0,0 +1,7 @@
package fileutils
// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
// on Windows.
func GetTotalUsedFds() int {
return -1
}

View File

@@ -0,0 +1,37 @@
package homedir
import (
"errors"
"os"
"path/filepath"
)
// GetDataHome returns XDG_DATA_HOME.
// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetDataHome() (string, error) {
if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" {
return xdgDataHome, nil
}
home := Get()
if home == "" {
return "", errors.New("could not get either XDG_DATA_HOME or HOME")
}
return filepath.Join(home, ".local", "share"), nil
}
// GetCacheHome returns XDG_CACHE_HOME.
// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetCacheHome() (string, error) {
if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" {
return xdgCacheHome, nil
}
home := Get()
if home == "" {
return "", errors.New("could not get either XDG_CACHE_HOME or HOME")
}
return filepath.Join(home, ".cache"), nil
}

View File

@@ -0,0 +1,38 @@
//go:build !linux && !darwin && !freebsd && !windows
// +build !linux,!darwin,!freebsd,!windows
package homedir
// Copyright 2013-2018 Docker, Inc.
// NOTE: this package has originally been copied from github.com/docker/docker.
import (
"errors"
"os"
"path/filepath"
)
// GetRuntimeDir is unsupported on non-linux system.
func GetRuntimeDir() (string, error) {
return "", errors.New("homedir.GetRuntimeDir() is not supported on this system")
}
// StickRuntimeDirContents is unsupported on non-linux system.
func StickRuntimeDirContents(files []string) ([]string, error) {
return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system")
}
// GetConfigHome returns XDG_CONFIG_HOME.
// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetConfigHome() (string, error) {
if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
return xdgConfigHome, nil
}
home := Get()
if home == "" {
return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
}
return filepath.Join(home, ".config"), nil
}

View File

@@ -0,0 +1,183 @@
//go:build !windows
// +build !windows
package homedir
// Copyright 2013-2018 Docker, Inc.
// NOTE: this package has originally been copied from github.com/docker/docker.
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"github.com/containers/storage/pkg/unshare"
"github.com/sirupsen/logrus"
)
// Key returns the env var name for the user's home dir based on
// the platform being run on
func Key() string {
return "HOME"
}
// Get returns the home directory of the current user with the help of
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
//
// If linking statically with cgo enabled against glibc, ensure the
// osusergo build tag is used.
//
// If needing to do nss lookups, do not disable cgo or set osusergo.
func Get() string {
homedir, _ := unshare.HomeDir()
return homedir
}
// GetShortcutString returns the string that is shortcut to user's home directory
// in the native shell of the platform running on.
func GetShortcutString() string {
return "~"
}
// StickRuntimeDirContents sets the sticky bit on files that are under
// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system.
//
// StickyRuntimeDir returns slice of sticked files.
// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func StickRuntimeDirContents(files []string) ([]string, error) {
runtimeDir, err := GetRuntimeDir()
if err != nil {
// ignore error if runtimeDir is empty
return nil, nil //nolint: nilerr
}
runtimeDir, err = filepath.Abs(runtimeDir)
if err != nil {
return nil, err
}
var sticked []string
for _, f := range files {
f, err = filepath.Abs(f)
if err != nil {
return sticked, err
}
if strings.HasPrefix(f, runtimeDir+"/") {
if err = stick(f); err != nil {
return sticked, err
}
sticked = append(sticked, f)
}
}
return sticked, nil
}
func stick(f string) error {
st, err := os.Stat(f)
if err != nil {
return err
}
m := st.Mode()
m |= os.ModeSticky
return os.Chmod(f, m)
}
var (
rootlessConfigHomeDirError error
rootlessConfigHomeDirOnce sync.Once
rootlessConfigHomeDir string
rootlessRuntimeDirOnce sync.Once
rootlessRuntimeDir string
)
// isWriteableOnlyByOwner checks that the specified permission mask allows write
// access only to the owner.
func isWriteableOnlyByOwner(perm os.FileMode) bool {
return (perm & 0o722) == 0o700
}
// GetConfigHome returns XDG_CONFIG_HOME.
// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetConfigHome() (string, error) {
rootlessConfigHomeDirOnce.Do(func() {
cfgHomeDir := os.Getenv("XDG_CONFIG_HOME")
if cfgHomeDir == "" {
home := Get()
resolvedHome, err := filepath.EvalSymlinks(home)
if err != nil {
rootlessConfigHomeDirError = fmt.Errorf("cannot resolve %s: %w", home, err)
return
}
tmpDir := filepath.Join(resolvedHome, ".config")
_ = os.MkdirAll(tmpDir, 0o700)
st, err := os.Stat(tmpDir)
if err != nil {
rootlessConfigHomeDirError = err
return
} else if int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() {
cfgHomeDir = tmpDir
} else {
rootlessConfigHomeDirError = fmt.Errorf("path %q exists and it is not owned by the current user", tmpDir)
return
}
}
rootlessConfigHomeDir = cfgHomeDir
})
return rootlessConfigHomeDir, rootlessConfigHomeDirError
}
// GetRuntimeDir returns a directory suitable to store runtime files.
// The function will try to use the XDG_RUNTIME_DIR env variable if it is set.
// XDG_RUNTIME_DIR is typically configured via pam_systemd.
// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable
// directory for the current user.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetRuntimeDir() (string, error) {
var rootlessRuntimeDirError error
rootlessRuntimeDirOnce.Do(func() {
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir != "" {
rootlessRuntimeDir, rootlessRuntimeDirError = filepath.EvalSymlinks(runtimeDir)
return
}
uid := strconv.Itoa(unshare.GetRootlessUID())
if runtimeDir == "" {
tmpDir := filepath.Join("/run", "user", uid)
if err := os.MkdirAll(tmpDir, 0o700); err != nil {
logrus.Debug(err)
}
st, err := os.Lstat(tmpDir)
if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) {
runtimeDir = tmpDir
}
}
if runtimeDir == "" {
tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("storage-run-%s", uid))
if err := os.MkdirAll(tmpDir, 0o700); err != nil {
logrus.Debug(err)
}
st, err := os.Lstat(tmpDir)
if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) {
runtimeDir = tmpDir
} else {
rootlessRuntimeDirError = fmt.Errorf("path %q exists and it is not writeable only by the current user", tmpDir)
return
}
}
rootlessRuntimeDir = runtimeDir
})
return rootlessRuntimeDir, rootlessRuntimeDirError
}

View File

@@ -0,0 +1,61 @@
package homedir
// Copyright 2013-2018 Docker, Inc.
// NOTE: this package has originally been copied from github.com/docker/docker.
import (
"os"
"path/filepath"
)
// Key returns the env var name for the user's home dir based on
// the platform being run on
func Key() string {
return "USERPROFILE"
}
// Get returns the home directory of the current user with the help of
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
func Get() string {
home := os.Getenv(Key())
if home != "" {
return home
}
home, _ = os.UserHomeDir()
return home
}
// GetConfigHome returns the home directory of the current user with the help of
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
func GetConfigHome() (string, error) {
return filepath.Join(Get(), ".config"), nil
}
// GetShortcutString returns the string that is shortcut to user's home directory
// in the native shell of the platform running on.
func GetShortcutString() string {
return "%USERPROFILE%" // be careful while using in format functions
}
// StickRuntimeDirContents is a no-op on Windows
func StickRuntimeDirContents(files []string) ([]string, error) {
return nil, nil
}
// GetRuntimeDir returns a directory suitable to store runtime files.
// The function will try to use the XDG_RUNTIME_DIR env variable if it is set.
// XDG_RUNTIME_DIR is typically configured via pam_systemd.
// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable
// directory for the current user.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetRuntimeDir() (string, error) {
data, err := GetDataHome()
if err != nil {
return "", err
}
runtimeDir := filepath.Join(data, "containers", "storage")
return runtimeDir, nil
}

View File

@@ -0,0 +1,461 @@
package idtools
import (
"bufio"
"errors"
"fmt"
"os"
"os/user"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"github.com/containers/storage/pkg/system"
"github.com/sirupsen/logrus"
)
// IDMap contains a single entry for user namespace range remapping. An array
// of IDMap entries represents the structure that will be provided to the Linux
// kernel for creating a user namespace.
type IDMap struct {
ContainerID int `json:"container_id"`
HostID int `json:"host_id"`
Size int `json:"size"`
}
type subIDRange struct {
Start int
Length int
}
type ranges []subIDRange
func (e ranges) Len() int { return len(e) }
func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
const (
subuidFileName string = "/etc/subuid"
subgidFileName string = "/etc/subgid"
ContainersOverrideXattr = "user.containers.override_stat"
)
// MkdirAllAs creates a directory (include any along the path) and then modifies
// ownership to the requested uid/gid. If the directory already exists, this
// function will still change ownership to the requested uid/gid pair.
// Deprecated: Use MkdirAllAndChown
func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
}
// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
// If the directory already exists, this function still changes ownership
// Deprecated: Use MkdirAndChown with a IDPair
func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
}
// MkdirAllAndChown creates a directory (include any along the path) and then modifies
// ownership to the requested uid/gid. If the directory already exists, this
// function will still change ownership to the requested uid/gid pair.
func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error {
return mkdirAs(path, mode, ids.UID, ids.GID, true, true)
}
// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
// If the directory already exists, this function still changes ownership
func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error {
return mkdirAs(path, mode, ids.UID, ids.GID, false, true)
}
// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
// ownership ONLY of newly created directories to the requested uid/gid. If the
// directories along the path exist, no change of ownership will be performed
func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error {
return mkdirAs(path, mode, ids.UID, ids.GID, true, false)
}
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
// If the maps are empty, then the root uid/gid will default to "real" 0/0
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
var uid, gid int
var err error
if len(uidMap) == 1 && uidMap[0].Size == 1 {
uid = uidMap[0].HostID
} else {
uid, err = RawToHost(0, uidMap)
if err != nil {
return -1, -1, err
}
}
if len(gidMap) == 1 && gidMap[0].Size == 1 {
gid = gidMap[0].HostID
} else {
gid, err = RawToHost(0, gidMap)
if err != nil {
return -1, -1, err
}
}
return uid, gid, nil
}
// RawToContainer takes an id mapping, and uses it to translate a host ID to
// the remapped ID. If no map is provided, then the translation assumes a
// 1-to-1 mapping and returns the passed in id.
//
// If you wish to map a (uid,gid) combination you should use the corresponding
// IDMappings methods, which ensure that you are mapping the correct ID against
// the correct mapping.
func RawToContainer(hostID int, idMap []IDMap) (int, error) {
if idMap == nil {
return hostID, nil
}
for _, m := range idMap {
if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
contID := m.ContainerID + (hostID - m.HostID)
return contID, nil
}
}
return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID)
}
// RawToHost takes an id mapping and a remapped ID, and translates the ID to
// the mapped host ID. If no map is provided, then the translation assumes a
// 1-to-1 mapping and returns the passed in id.
//
// If you wish to map a (uid,gid) combination you should use the corresponding
// IDMappings methods, which ensure that you are mapping the correct ID against
// the correct mapping.
func RawToHost(contID int, idMap []IDMap) (int, error) {
if idMap == nil {
return contID, nil
}
for _, m := range idMap {
if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
hostID := m.HostID + (contID - m.ContainerID)
return hostID, nil
}
}
return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID)
}
// IDPair is a UID and GID pair
type IDPair struct {
UID int
GID int
}
// IDMappings contains a mappings of UIDs and GIDs
type IDMappings struct {
uids []IDMap
gids []IDMap
}
// NewIDMappings takes a requested user and group name and
// using the data from /etc/sub{uid,gid} ranges, creates the
// proper uid and gid remapping ranges for that user/group pair
func NewIDMappings(username, groupname string) (*IDMappings, error) {
subuidRanges, err := readSubuid(username)
if err != nil {
return nil, err
}
subgidRanges, err := readSubgid(groupname)
if err != nil {
return nil, err
}
if len(subuidRanges) == 0 {
return nil, fmt.Errorf("no subuid ranges found for user %q in %s", username, subuidFileName)
}
if len(subgidRanges) == 0 {
return nil, fmt.Errorf("no subgid ranges found for group %q in %s", groupname, subgidFileName)
}
return &IDMappings{
uids: createIDMap(subuidRanges),
gids: createIDMap(subgidRanges),
}, nil
}
// NewIDMappingsFromMaps creates a new mapping from two slices
// Deprecated: this is a temporary shim while transitioning to IDMapping
func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings {
return &IDMappings{uids: uids, gids: gids}
}
// RootPair returns a uid and gid pair for the root user. The error is ignored
// because a root user always exists, and the defaults are correct when the uid
// and gid maps are empty.
func (i *IDMappings) RootPair() IDPair {
uid, gid, _ := GetRootUIDGID(i.uids, i.gids)
return IDPair{UID: uid, GID: gid}
}
// ToHost returns the host UID and GID for the container uid, gid.
func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
var err error
var target IDPair
target.UID, err = RawToHost(pair.UID, i.uids)
if err != nil {
return target, err
}
target.GID, err = RawToHost(pair.GID, i.gids)
return target, err
}
var (
overflowUIDOnce sync.Once
overflowGIDOnce sync.Once
overflowUID int
overflowGID int
)
// getOverflowUID returns the UID mapped to the overflow user
func getOverflowUID() int {
overflowUIDOnce.Do(func() {
// 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present
overflowUID = 65534
if content, err := os.ReadFile("/proc/sys/kernel/overflowuid"); err == nil {
if tmp, err := strconv.Atoi(string(content)); err == nil {
overflowUID = tmp
}
}
})
return overflowUID
}
// getOverflowGID returns the GID mapped to the overflow user
func getOverflowGID() int {
overflowGIDOnce.Do(func() {
// 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present
overflowGID = 65534
if content, err := os.ReadFile("/proc/sys/kernel/overflowgid"); err == nil {
if tmp, err := strconv.Atoi(string(content)); err == nil {
overflowGID = tmp
}
}
})
return overflowGID
}
// ToHost returns the host UID and GID for the container uid, gid.
// Remapping is only performed if the ids aren't already the remapped root ids
// If the mapping is not possible because the target ID is not mapped into
// the namespace, then the overflow ID is used.
func (i *IDMappings) ToHostOverflow(pair IDPair) (IDPair, error) {
var err error
target := i.RootPair()
if pair.UID != target.UID {
target.UID, err = RawToHost(pair.UID, i.uids)
if err != nil {
target.UID = getOverflowUID()
logrus.Debugf("Failed to map UID %v to the target mapping, using the overflow ID %v", pair.UID, target.UID)
}
}
if pair.GID != target.GID {
target.GID, err = RawToHost(pair.GID, i.gids)
if err != nil {
target.GID = getOverflowGID()
logrus.Debugf("Failed to map GID %v to the target mapping, using the overflow ID %v", pair.GID, target.GID)
}
}
return target, nil
}
// ToContainer returns the container UID and GID for the host uid and gid
func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
uid, err := RawToContainer(pair.UID, i.uids)
if err != nil {
return -1, -1, err
}
gid, err := RawToContainer(pair.GID, i.gids)
return uid, gid, err
}
// Empty returns true if there are no id mappings
func (i *IDMappings) Empty() bool {
return len(i.uids) == 0 && len(i.gids) == 0
}
// UIDs return the UID mapping
// TODO: remove this once everything has been refactored to use pairs
func (i *IDMappings) UIDs() []IDMap {
return i.uids
}
// GIDs return the UID mapping
// TODO: remove this once everything has been refactored to use pairs
func (i *IDMappings) GIDs() []IDMap {
return i.gids
}
func createIDMap(subidRanges ranges) []IDMap {
idMap := []IDMap{}
// sort the ranges by lowest ID first
sort.Sort(subidRanges)
containerID := 0
for _, idrange := range subidRanges {
idMap = append(idMap, IDMap{
ContainerID: containerID,
HostID: idrange.Start,
Size: idrange.Length,
})
containerID = containerID + idrange.Length
}
return idMap
}
// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
// and return all found ranges for a specified username. If the special value
// "ALL" is supplied for username, then all ranges in the file will be returned
func parseSubidFile(path, username string) (ranges, error) {
var (
rangeList ranges
uidstr string
)
if u, err := user.Lookup(username); err == nil {
uidstr = u.Uid
}
subidFile, err := os.Open(path)
if err != nil {
return rangeList, err
}
defer subidFile.Close()
s := bufio.NewScanner(subidFile)
for s.Scan() {
if err := s.Err(); err != nil {
return rangeList, err
}
text := strings.TrimSpace(s.Text())
if text == "" || strings.HasPrefix(text, "#") {
continue
}
parts := strings.Split(text, ":")
if len(parts) != 3 {
return rangeList, fmt.Errorf("cannot parse subuid/gid information: Format not correct for %s file", path)
}
if parts[0] == username || username == "ALL" || (parts[0] == uidstr && parts[0] != "") {
startid, err := strconv.Atoi(parts[1])
if err != nil {
return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err)
}
length, err := strconv.Atoi(parts[2])
if err != nil {
return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err)
}
rangeList = append(rangeList, subIDRange{startid, length})
}
}
return rangeList, nil
}
func checkChownErr(err error, name string, uid, gid int) error {
var e *os.PathError
if errors.As(err, &e) && e.Err == syscall.EINVAL {
return fmt.Errorf(`potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run "podman system migrate": %w`, uid, gid, name, err)
}
return err
}
func SafeChown(name string, uid, gid int) error {
if runtime.GOOS == "darwin" {
var mode uint64 = 0o0700
xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
if err == nil {
attrs := strings.Split(string(xstat), ":")
if len(attrs) == 3 {
val, err := strconv.ParseUint(attrs[2], 8, 32)
if err == nil {
mode = val
}
}
}
value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil {
return err
}
uid = os.Getuid()
gid = os.Getgid()
}
if stat, statErr := system.Stat(name); statErr == nil {
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
return nil
}
}
return checkChownErr(os.Chown(name, uid, gid), name, uid, gid)
}
func SafeLchown(name string, uid, gid int) error {
if runtime.GOOS == "darwin" {
var mode uint64 = 0o0700
xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
if err == nil {
attrs := strings.Split(string(xstat), ":")
if len(attrs) == 3 {
val, err := strconv.ParseUint(attrs[2], 8, 32)
if err == nil {
mode = val
}
}
}
value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil {
return err
}
uid = os.Getuid()
gid = os.Getgid()
}
if stat, statErr := system.Lstat(name); statErr == nil {
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
return nil
}
}
return checkChownErr(os.Lchown(name, uid, gid), name, uid, gid)
}
type sortByHostID []IDMap
func (e sortByHostID) Len() int { return len(e) }
func (e sortByHostID) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e sortByHostID) Less(i, j int) bool { return e[i].HostID < e[j].HostID }
type sortByContainerID []IDMap
func (e sortByContainerID) Len() int { return len(e) }
func (e sortByContainerID) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e sortByContainerID) Less(i, j int) bool { return e[i].ContainerID < e[j].ContainerID }
// IsContiguous checks if the specified mapping is contiguous and doesn't
// have any hole.
func IsContiguous(mappings []IDMap) bool {
if len(mappings) < 2 {
return true
}
var mh sortByHostID = mappings[:]
sort.Sort(mh)
for i := 1; i < len(mh); i++ {
if mh[i].HostID != mh[i-1].HostID+mh[i-1].Size {
return false
}
}
var mc sortByContainerID = mappings[:]
sort.Sort(mc)
for i := 1; i < len(mc); i++ {
if mc[i].ContainerID != mc[i-1].ContainerID+mc[i-1].Size {
return false
}
}
return true
}

View File

@@ -0,0 +1,87 @@
//go:build linux && cgo && libsubid
// +build linux,cgo,libsubid
package idtools
import (
"errors"
"os/user"
"unsafe"
)
/*
#cgo LDFLAGS: -l subid
#include <shadow/subid.h>
#include <stdlib.h>
#include <stdio.h>
const char *Prog = "storage";
FILE *shadow_logfd = NULL;
struct subid_range get_range(struct subid_range *ranges, int i)
{
shadow_logfd = stderr;
return ranges[i];
}
#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4)
# define subid_get_uid_ranges get_subuid_ranges
# define subid_get_gid_ranges get_subgid_ranges
#endif
*/
import "C"
func readSubid(username string, isUser bool) (ranges, error) {
var ret ranges
uidstr := ""
if username == "ALL" {
return nil, errors.New("username ALL not supported")
}
if u, err := user.Lookup(username); err == nil {
uidstr = u.Uid
}
cUsername := C.CString(username)
defer C.free(unsafe.Pointer(cUsername))
cuidstr := C.CString(uidstr)
defer C.free(unsafe.Pointer(cuidstr))
var nRanges C.int
var cRanges *C.struct_subid_range
if isUser {
nRanges = C.subid_get_uid_ranges(cUsername, &cRanges)
if nRanges <= 0 {
nRanges = C.subid_get_uid_ranges(cuidstr, &cRanges)
}
} else {
nRanges = C.subid_get_gid_ranges(cUsername, &cRanges)
if nRanges <= 0 {
nRanges = C.subid_get_gid_ranges(cuidstr, &cRanges)
}
}
if nRanges < 0 {
return nil, errors.New("cannot read subids")
}
defer C.free(unsafe.Pointer(cRanges))
for i := 0; i < int(nRanges); i++ {
r := C.get_range(cRanges, C.int(i))
newRange := subIDRange{
Start: int(r.start),
Length: int(r.count),
}
ret = append(ret, newRange)
}
return ret, nil
}
func readSubuid(username string) (ranges, error) {
return readSubid(username, true)
}
func readSubgid(username string) (ranges, error) {
return readSubid(username, false)
}

View File

@@ -0,0 +1,215 @@
//go:build !windows
// +build !windows
package idtools
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
"github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/system"
"github.com/moby/sys/user"
)
var (
entOnce sync.Once
getentCmd string
)
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
// make an array containing the original path asked for, plus (for mkAll == true)
// all path components leading up to the complete path that don't exist before we MkdirAll
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
// chown the full directory path if it exists
var paths []string
st, err := os.Stat(path)
if err != nil && os.IsNotExist(err) {
paths = []string{path}
} else if err == nil {
if !st.IsDir() {
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
}
if chownExisting {
// short-circuit--we were called with an existing directory and chown was requested
return SafeChown(path, ownerUID, ownerGID)
}
// nothing to do; directory exists and chown was NOT requested
return nil
}
if mkAll {
// walk back to "/" looking for directories which do not exist
// and add them to the paths array for chown after creation
dirPath := path
if !filepath.IsAbs(dirPath) {
return fmt.Errorf("path: %s should be absolute", dirPath)
}
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if err := fileutils.Exists(dirPath); err != nil && os.IsNotExist(err) {
paths = append(paths, dirPath)
}
}
if err := os.MkdirAll(path, mode); err != nil {
return err
}
} else {
if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
return err
}
}
// even if it existed, we will chown the requested path + any subpaths that
// didn't exist when we called MkdirAll
for _, pathComponent := range paths {
if err := SafeChown(pathComponent, ownerUID, ownerGID); err != nil {
return err
}
}
return nil
}
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
// if that uid, gid pair has access (execute bit) to the directory
func CanAccess(path string, pair IDPair) bool {
statInfo, err := system.Stat(path)
if err != nil {
return false
}
fileMode := os.FileMode(statInfo.Mode())
permBits := fileMode.Perm()
return accessible(statInfo.UID() == uint32(pair.UID),
statInfo.GID() == uint32(pair.GID), permBits)
}
func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
if isOwner && (perms&0o100 == 0o100) {
return true
}
if isGroup && (perms&0o010 == 0o010) {
return true
}
if perms&0o001 == 0o001 {
return true
}
return false
}
// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupUser(username string) (user.User, error) {
// first try a local system files lookup using existing capabilities
usr, err := user.LookupUser(username)
if err == nil {
return usr, nil
}
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username))
if err != nil {
return user.User{}, err
}
return usr, nil
}
// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupUID(uid int) (user.User, error) {
// first try a local system files lookup using existing capabilities
usr, err := user.LookupUid(uid)
if err == nil {
return usr, nil
}
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
return getentUser(fmt.Sprintf("%s %d", "passwd", uid))
}
func getentUser(args string) (user.User, error) {
reader, err := callGetent(args)
if err != nil {
return user.User{}, err
}
users, err := user.ParsePasswd(reader)
if err != nil {
return user.User{}, err
}
if len(users) == 0 {
return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1])
}
return users[0], nil
}
// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupGroup(groupname string) (user.Group, error) {
// first try a local system files lookup using existing capabilities
group, err := user.LookupGroup(groupname)
if err == nil {
return group, nil
}
// local files lookup failed; attempt to call `getent` to query configured group dbs
return getentGroup(fmt.Sprintf("%s %s", "group", groupname))
}
// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupGID(gid int) (user.Group, error) {
// first try a local system files lookup using existing capabilities
group, err := user.LookupGid(gid)
if err == nil {
return group, nil
}
// local files lookup failed; attempt to call `getent` to query configured group dbs
return getentGroup(fmt.Sprintf("%s %d", "group", gid))
}
func getentGroup(args string) (user.Group, error) {
reader, err := callGetent(args)
if err != nil {
return user.Group{}, err
}
groups, err := user.ParseGroup(reader)
if err != nil {
return user.Group{}, err
}
if len(groups) == 0 {
return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1])
}
return groups[0], nil
}
func callGetent(args string) (io.Reader, error) {
entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
// if no `getent` command on host, can't do anything else
if getentCmd == "" {
return nil, fmt.Errorf("")
}
out, err := execCmd(getentCmd, args)
if err != nil {
exitCode, errC := system.GetExitCode(err)
if errC != nil {
return nil, err
}
switch exitCode {
case 1:
return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
case 2:
terms := strings.Split(args, " ")
return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0])
case 3:
return nil, fmt.Errorf("getent database doesn't support enumeration")
default:
return nil, err
}
}
return bytes.NewReader(out), nil
}

View File

@@ -0,0 +1,12 @@
//go:build !linux || !libsubid || !cgo
// +build !linux !libsubid !cgo
package idtools
func readSubuid(username string) (ranges, error) {
return parseSubidFile(subuidFileName, username)
}
func readSubgid(username string) (ranges, error) {
return parseSubidFile(subgidFileName, username)
}

View File

@@ -0,0 +1,24 @@
//go:build windows
// +build windows
package idtools
import (
"os"
)
// Platforms such as Windows do not support the UID/GID concept. So make this
// just a wrapper around system.MkdirAll.
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
if err := os.MkdirAll(path, mode); err != nil {
return err
}
return nil
}
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
// if that uid, gid pair has access (execute bit) to the directory
// Windows does not require/support this function, so always return true
func CanAccess(path string, pair IDPair) bool {
return true
}

View File

@@ -0,0 +1,59 @@
package idtools
import (
"fmt"
"math"
"math/bits"
"strconv"
"strings"
)
func parseTriple(spec []string) (container, host, size uint32, err error) {
cid, err := strconv.ParseUint(spec[0], 10, 32)
if err != nil {
return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[0], err)
}
hid, err := strconv.ParseUint(spec[1], 10, 32)
if err != nil {
return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[1], err)
}
sz, err := strconv.ParseUint(spec[2], 10, 32)
if err != nil {
return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[2], err)
}
return uint32(cid), uint32(hid), uint32(sz), nil
}
// ParseIDMap parses idmap triples from string.
func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) {
stdErr := fmt.Errorf("initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec)
for _, idMapSpec := range mapSpec {
if idMapSpec == "" {
continue
}
idSpec := strings.Split(idMapSpec, ":")
if len(idSpec)%3 != 0 {
return nil, stdErr
}
for i := range idSpec {
if i%3 != 0 {
continue
}
cid, hid, size, err := parseTriple(idSpec[i : i+3])
if err != nil {
return nil, stdErr
}
// Avoid possible integer overflow on 32bit builds
if bits.UintSize == 32 && (cid > math.MaxInt32 || hid > math.MaxInt32 || size > math.MaxInt32) {
return nil, stdErr
}
mapping := IDMap{
ContainerID: int(cid),
HostID: int(hid),
Size: int(size),
}
idmap = append(idmap, mapping)
}
}
return idmap, nil
}

View File

@@ -0,0 +1,164 @@
package idtools
import (
"fmt"
"sort"
"strconv"
"strings"
"sync"
"github.com/containers/storage/pkg/regexp"
)
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
// Linux distribution commands:
// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
// useradd -r -s /bin/false <username>
var (
once sync.Once
userCommand string
cmdTemplates = map[string]string{
"adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
"useradd": "-r -s /bin/false %s",
"usermod": "-%s %d-%d %s",
}
idOutRegexp = regexp.Delayed(`uid=([0-9]+).*gid=([0-9]+)`)
// default length for a UID/GID subordinate range
defaultRangeLen = 65536
defaultRangeStart = 100000
userMod = "usermod"
)
// AddNamespaceRangesUser takes a username and uses the standard system
// utility to create a system user/group pair used to hold the
// /etc/sub{uid,gid} ranges which will be used for user namespace
// mapping ranges in containers.
func AddNamespaceRangesUser(name string) (int, int, error) {
if err := addUser(name); err != nil {
return -1, -1, fmt.Errorf("adding user %q: %w", name, err)
}
// Query the system for the created uid and gid pair
out, err := execCmd("id", name)
if err != nil {
return -1, -1, fmt.Errorf("trying to find uid/gid for new user %q: %w", name, err)
}
matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
if len(matches) != 3 {
return -1, -1, fmt.Errorf("can't find uid, gid from `id` output: %q", string(out))
}
uid, err := strconv.Atoi(matches[1])
if err != nil {
return -1, -1, fmt.Errorf("can't convert found uid (%s) to int: %w", matches[1], err)
}
gid, err := strconv.Atoi(matches[2])
if err != nil {
return -1, -1, fmt.Errorf("can't convert found gid (%s) to int: %w", matches[2], err)
}
// Now we need to create the subuid/subgid ranges for our new user/group (system users
// do not get auto-created ranges in subuid/subgid)
if err := createSubordinateRanges(name); err != nil {
return -1, -1, fmt.Errorf("couldn't create subordinate ID ranges: %w", err)
}
return uid, gid, nil
}
func addUser(userName string) error {
once.Do(func() {
// set up which commands are used for adding users/groups dependent on distro
if _, err := resolveBinary("adduser"); err == nil {
userCommand = "adduser"
} else if _, err := resolveBinary("useradd"); err == nil {
userCommand = "useradd"
}
})
if userCommand == "" {
return fmt.Errorf("cannot add user; no useradd/adduser binary found")
}
args := fmt.Sprintf(cmdTemplates[userCommand], userName)
out, err := execCmd(userCommand, args)
if err != nil {
return fmt.Errorf("failed to add user with error: %w; output: %q", err, string(out))
}
return nil
}
func createSubordinateRanges(name string) error {
// first, we should verify that ranges weren't automatically created
// by the distro tooling
ranges, err := readSubuid(name)
if err != nil {
return fmt.Errorf("while looking for subuid ranges for user %q: %w", name, err)
}
if len(ranges) == 0 {
// no UID ranges; let's create one
startID, err := findNextUIDRange()
if err != nil {
return fmt.Errorf("can't find available subuid range: %w", err)
}
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
if err != nil {
return fmt.Errorf("unable to add subuid range to user: %q; output: %s, err: %w", name, out, err)
}
}
ranges, err = readSubgid(name)
if err != nil {
return fmt.Errorf("while looking for subgid ranges for user %q: %w", name, err)
}
if len(ranges) == 0 {
// no GID ranges; let's create one
startID, err := findNextGIDRange()
if err != nil {
return fmt.Errorf("can't find available subgid range: %w", err)
}
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
if err != nil {
return fmt.Errorf("unable to add subgid range to user: %q; output: %s, err: %w", name, out, err)
}
}
return nil
}
func findNextUIDRange() (int, error) {
ranges, err := readSubuid("ALL")
if err != nil {
return -1, fmt.Errorf("couldn't parse all ranges in /etc/subuid file: %w", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
}
func findNextGIDRange() (int, error) {
ranges, err := readSubgid("ALL")
if err != nil {
return -1, fmt.Errorf("couldn't parse all ranges in /etc/subgid file: %w", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
}
func findNextRangeStart(rangeList ranges) (int, error) {
startID := defaultRangeStart
for _, arange := range rangeList {
if wouldOverlap(arange, startID) {
startID = arange.Start + arange.Length
}
}
return startID, nil
}
func wouldOverlap(arange subIDRange, ID int) bool {
low := ID
high := ID + defaultRangeLen
if (low >= arange.Start && low <= arange.Start+arange.Length) ||
(high <= arange.Start+arange.Length && high >= arange.Start) {
return true
}
return false
}

View File

@@ -0,0 +1,13 @@
//go:build !linux
// +build !linux
package idtools
import "fmt"
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
// and calls the appropriate helper function to add the group and then
// the user to the group in /etc/group and /etc/passwd respectively.
func AddNamespaceRangesUser(name string) (int, int, error) {
return -1, -1, fmt.Errorf("no support for adding users or groups on this OS")
}

View File

@@ -0,0 +1,33 @@
//go:build !windows
// +build !windows
package idtools
import (
"fmt"
"os/exec"
"path/filepath"
"strings"
)
func resolveBinary(binname string) (string, error) {
binaryPath, err := exec.LookPath(binname)
if err != nil {
return "", err
}
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
if err != nil {
return "", err
}
// only return no error if the final resolved binary basename
// matches what was searched for
if filepath.Base(resolvedPath) == binname {
return resolvedPath, nil
}
return "", fmt.Errorf("binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
}
func execCmd(cmd, args string) ([]byte, error) {
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
return execCmd.CombinedOutput()
}

View File

@@ -0,0 +1,82 @@
package lockfile
import (
"bytes"
cryptorand "crypto/rand"
"encoding/binary"
"os"
"sync/atomic"
"time"
)
// LastWrite is an opaque identifier of the last write to some *LockFile.
// It can be used by users of a *LockFile to determine if the lock indicates changes
// since the last check.
//
// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back.
type LastWrite struct {
// Never modify fields of a LastWrite object; it has value semantics.
state []byte // Contents of the lock file.
}
var lastWriterIDCounter uint64 // Private state for newLastWriterID
const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
// newLastWrite returns a new "last write" ID.
// The value must be different on every call, and also differ from values
// generated by other processes.
func newLastWrite() LastWrite {
// The ID is (PID, time, per-process counter, random)
// PID + time represents both a unique process across reboots,
// and a specific time within the process; the per-process counter
// is an extra safeguard for in-process concurrency.
// The random part disambiguates across process namespaces
// (where PID values might collide), serves as a general-purpose
// extra safety, _and_ is used to pad the output to lastWriterIDSize,
// because other versions of this code exist and they don't work
// efficiently if the size of the value changes.
pid := os.Getpid()
tm := time.Now().UnixNano()
counter := atomic.AddUint64(&lastWriterIDCounter, 1)
res := make([]byte, lastWriterIDSize)
binary.LittleEndian.PutUint64(res[0:8], uint64(tm))
binary.LittleEndian.PutUint64(res[8:16], counter)
binary.LittleEndian.PutUint32(res[16:20], uint32(pid))
if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 {
panic(err) // This shouldn't happen
}
return LastWrite{
state: res,
}
}
// serialize returns bytes to write to the lock file to represent the specified write.
func (lw LastWrite) serialize() []byte {
if lw.state == nil {
panic("LastWrite.serialize on an uninitialized object")
}
return lw.state
}
// Equals returns true if lw matches other
func (lw LastWrite) equals(other LastWrite) bool {
if lw.state == nil {
panic("LastWrite.equals on an uninitialized object")
}
if other.state == nil {
panic("LastWrite.equals with an uninitialized counterparty")
}
return bytes.Equal(lw.state, other.state)
}
// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize
func newLastWriteFromData(serialized []byte) LastWrite {
if serialized == nil {
panic("newLastWriteFromData with nil data")
}
return LastWrite{
state: serialized,
}
}

View File

@@ -0,0 +1,461 @@
package lockfile
import (
"fmt"
"os"
"path/filepath"
"sync"
"time"
)
// A Locker represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected
// by the lock.
//
// Deprecated: Refer directly to *LockFile, the provided implementation, instead.
type Locker interface {
// Acquire a writer lock.
// The default unix implementation panics if:
// - opening the lockfile failed
// - tried to lock a read-only lock-file
Lock()
// Unlock the lock.
// The default unix implementation panics if:
// - unlocking an unlocked lock
// - if the lock counter is corrupted
Unlock()
// Acquire a reader lock.
RLock()
// Touch records, for others sharing the lock, that the caller was the
// last writer. It should only be called with the lock held.
//
// Deprecated: Use *LockFile.RecordWrite.
Touch() error
// Modified() checks if the most recent writer was a party other than the
// last recorded writer. It should only be called with the lock held.
// Deprecated: Use *LockFile.ModifiedSince.
Modified() (bool, error)
// TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time.
TouchedSince(when time.Time) bool
// IsReadWrite() checks if the lock file is read-write
IsReadWrite() bool
// AssertLocked() can be used by callers that _know_ that they hold the lock (for reading or writing), for sanity checking.
// It might do nothing at all, or it may panic if the caller is not the owner of this lock.
AssertLocked()
// AssertLockedForWriting() can be used by callers that _know_ that they hold the lock locked for writing, for sanity checking.
// It might do nothing at all, or it may panic if the caller is not the owner of this lock for writing.
AssertLockedForWriting()
}
type lockType byte
const (
readLock lockType = iota
writeLock
)
// LockFile represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected
// by the lock.
//
// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead.
type LockFile struct {
// The following fields are only set when constructing *LockFile, and must never be modified afterwards.
// They are safe to access without any other locking.
file string
ro bool
// rwMutex serializes concurrent reader-writer acquisitions in the same process space
rwMutex *sync.RWMutex
// stateMutex is used to synchronize concurrent accesses to the state below
stateMutex *sync.Mutex
counter int64
lw LastWrite // A global value valid as of the last .Touch() or .Modified()
lockType lockType
locked bool
// The following fields are only modified on transitions between counter == 0 / counter != 0.
// Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking.
// In other cases, they need to be protected using stateMutex.
fd fileHandle
}
var (
lockFiles map[string]*LockFile
lockFilesLock sync.Mutex
)
// GetLockFile opens a read-write lock file, creating it if necessary. The
// *LockFile object may already be locked if the path has already been requested
// by the current process.
func GetLockFile(path string) (*LockFile, error) {
return getLockfile(path, false)
}
// GetLockfile opens a read-write lock file, creating it if necessary. The
// Locker object may already be locked if the path has already been requested
// by the current process.
//
// Deprecated: Use GetLockFile
func GetLockfile(path string) (Locker, error) {
return GetLockFile(path)
}
// GetROLockFile opens a read-only lock file, creating it if necessary. The
// *LockFile object may already be locked if the path has already been requested
// by the current process.
func GetROLockFile(path string) (*LockFile, error) {
return getLockfile(path, true)
}
// GetROLockfile opens a read-only lock file, creating it if necessary. The
// Locker object may already be locked if the path has already been requested
// by the current process.
//
// Deprecated: Use GetROLockFile
func GetROLockfile(path string) (Locker, error) {
return GetROLockFile(path)
}
// Lock locks the lockfile as a writer. Panic if the lock is a read-only one.
func (l *LockFile) Lock() {
if l.ro {
panic("can't take write lock on read-only lock file")
} else {
l.lock(writeLock)
}
}
// RLock locks the lockfile as a reader.
func (l *LockFile) RLock() {
l.lock(readLock)
}
// TryLock attempts to lock the lockfile as a writer. Panic if the lock is a read-only one.
func (l *LockFile) TryLock() error {
if l.ro {
panic("can't take write lock on read-only lock file")
} else {
return l.tryLock(writeLock)
}
}
// TryRLock attempts to lock the lockfile as a reader.
func (l *LockFile) TryRLock() error {
return l.tryLock(readLock)
}
// Unlock unlocks the lockfile.
func (l *LockFile) Unlock() {
l.stateMutex.Lock()
if !l.locked {
// Panic when unlocking an unlocked lock. That's a violation
// of the lock semantics and will reveal such.
panic("calling Unlock on unlocked lock")
}
l.counter--
if l.counter < 0 {
// Panic when the counter is negative. There is no way we can
// recover from a corrupted lock and we need to protect the
// storage from corruption.
panic(fmt.Sprintf("lock %q has been unlocked too often", l.file))
}
if l.counter == 0 {
// We should only release the lock when the counter is 0 to
// avoid releasing read-locks too early; a given process may
// acquire a read lock multiple times.
l.locked = false
// Close the file descriptor on the last unlock, releasing the
// file lock.
unlockAndCloseHandle(l.fd)
}
if l.lockType == readLock {
l.rwMutex.RUnlock()
} else {
l.rwMutex.Unlock()
}
l.stateMutex.Unlock()
}
func (l *LockFile) AssertLocked() {
// DO NOT provide a variant that returns the value of l.locked.
//
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
// we cant tell the difference.
//
// Hence, this “AssertLocked” method, which exists only for sanity checks.
// Dont even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true
// with no possible writers.
// If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data
// without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers.
if !l.locked {
panic("internal error: lock is not held by the expected owner")
}
}
func (l *LockFile) AssertLockedForWriting() {
// DO NOT provide a variant that returns the current lock state.
//
// The same caveats as for AssertLocked apply equally.
l.AssertLocked()
// Like AssertLocked, dont even bother with l.stateMutex.
if l.lockType == readLock {
panic("internal error: lock is not held for writing")
}
}
// ModifiedSince checks if the lock has been changed since a provided LastWrite value,
// and returns the one to record instead.
//
// If ModifiedSince reports no modification, the previous LastWrite value
// is still valid and can continue to be used.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should fail and keep using the previously-recorded LastWrite value,
// so that it continues failing until the situation is resolved. Similarly,
// it should only update the recorded LastWrite value after processing the update:
//
// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite)
// if err != nil { /* fail */ }
// state.lastWrite = lw2
// if modified {
// if err := reload(); err != nil { /* fail */ }
// state.lastWrite = lw2
// }
//
// The caller must hold the lock (for reading or writing).
func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) {
l.AssertLocked()
currentLW, err := l.GetLastWrite()
if err != nil {
return LastWrite{}, false, err
}
modified := !previous.equals(currentLW)
return currentLW, modified, nil
}
// Modified indicates if the lockfile has been updated since the last time it
// was loaded.
// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile.
// Callers cannot, in general, rely on this, because that might have happened for some other
// owner of the same *LockFile who created it previously.
//
// Deprecated: Use *LockFile.ModifiedSince.
func (l *LockFile) Modified() (bool, error) {
l.stateMutex.Lock()
if !l.locked {
panic("attempted to check last-writer in lockfile without locking it first")
}
defer l.stateMutex.Unlock()
oldLW := l.lw
// Note that this is called with stateMutex held; thats fine because ModifiedSince doesnt need to lock it.
currentLW, modified, err := l.ModifiedSince(oldLW)
if err != nil {
return true, err
}
l.lw = currentLW
return modified, nil
}
// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data.
//
// Deprecated: Use *LockFile.RecordWrite.
func (l *LockFile) Touch() error {
lw, err := l.RecordWrite()
if err != nil {
return err
}
l.stateMutex.Lock()
if !l.locked || (l.lockType == readLock) {
panic("attempted to update last-writer in lockfile without the write lock")
}
defer l.stateMutex.Unlock()
l.lw = lw
return nil
}
// IsReadWrite indicates if the lock file is a read-write lock.
func (l *LockFile) IsReadWrite() bool {
return !l.ro
}
// getLockFile returns a *LockFile object, possibly (depending on the platform)
// working inter-process, and associated with the specified path.
//
// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func getLockfile(path string, ro bool) (*LockFile, error) {
lockFilesLock.Lock()
defer lockFilesLock.Unlock()
if lockFiles == nil {
lockFiles = make(map[string]*LockFile)
}
cleanPath, err := filepath.Abs(path)
if err != nil {
return nil, fmt.Errorf("ensuring that path %q is an absolute path: %w", path, err)
}
if lockFile, ok := lockFiles[cleanPath]; ok {
if ro && lockFile.IsReadWrite() {
return nil, fmt.Errorf("lock %q is not a read-only lock", cleanPath)
}
if !ro && !lockFile.IsReadWrite() {
return nil, fmt.Errorf("lock %q is not a read-write lock", cleanPath)
}
return lockFile, nil
}
lockFile, err := createLockFileForPath(cleanPath, ro) // platform-dependent LockFile
if err != nil {
return nil, err
}
lockFiles[cleanPath] = lockFile
return lockFile, nil
}
// createLockFileForPath returns new *LockFile object, possibly (depending on the platform)
// working inter-process and associated with the specified path.
//
// This function will be called at most once for each path value within a single process.
//
// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func createLockFileForPath(path string, ro bool) (*LockFile, error) {
// Check if we can open the lock.
fd, err := openLock(path, ro)
if err != nil {
return nil, err
}
unlockAndCloseHandle(fd)
lType := writeLock
if ro {
lType = readLock
}
return &LockFile{
file: path,
ro: ro,
rwMutex: &sync.RWMutex{},
stateMutex: &sync.Mutex{},
lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change.
lockType: lType,
locked: false,
}, nil
}
// openLock opens the file at path and returns the corresponding file
// descriptor. The path is opened either read-only or read-write,
// depending on the value of ro argument.
//
// openLock will create the file and its parent directories,
// if necessary.
func openLock(path string, ro bool) (fd fileHandle, err error) {
flags := os.O_CREATE
if ro {
flags |= os.O_RDONLY
} else {
flags |= os.O_RDWR
}
fd, err = openHandle(path, flags)
if err == nil {
return fd, nil
}
// the directory of the lockfile seems to be removed, try to create it
if os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
return fd, fmt.Errorf("creating lock file directory: %w", err)
}
return openLock(path, ro)
}
return fd, &os.PathError{Op: "open", Path: path, Err: err}
}
// lock locks the lockfile via syscall based on the specified type and
// command.
func (l *LockFile) lock(lType lockType) {
if lType == readLock {
l.rwMutex.RLock()
} else {
l.rwMutex.Lock()
}
l.stateMutex.Lock()
defer l.stateMutex.Unlock()
if l.counter == 0 {
// If we're the first reference on the lock, we need to open the file again.
fd, err := openLock(l.file, l.ro)
if err != nil {
panic(err)
}
l.fd = fd
// Optimization: only use the (expensive) syscall when
// the counter is 0. In this case, we're either the first
// reader lock or a writer lock.
lockHandle(l.fd, lType, false)
}
l.lockType = lType
l.locked = true
l.counter++
}
// lock locks the lockfile via syscall based on the specified type and
// command.
func (l *LockFile) tryLock(lType lockType) error {
var success bool
if lType == readLock {
success = l.rwMutex.TryRLock()
} else {
success = l.rwMutex.TryLock()
}
if !success {
return fmt.Errorf("resource temporarily unavailable")
}
l.stateMutex.Lock()
defer l.stateMutex.Unlock()
if l.counter == 0 {
// If we're the first reference on the lock, we need to open the file again.
fd, err := openLock(l.file, l.ro)
if err != nil {
l.rwMutex.Unlock()
return err
}
l.fd = fd
// Optimization: only use the (expensive) syscall when
// the counter is 0. In this case, we're either the first
// reader lock or a writer lock.
if err = lockHandle(l.fd, lType, true); err != nil {
closeHandle(fd)
l.rwMutex.Unlock()
return err
}
}
l.lockType = lType
l.locked = true
l.counter++
return nil
}

View File

@@ -0,0 +1,107 @@
//go:build linux || solaris || darwin || freebsd
// +build linux solaris darwin freebsd
package lockfile
import (
"time"
"github.com/containers/storage/pkg/system"
"golang.org/x/sys/unix"
)
type fileHandle uintptr
// GetLastWrite returns a LastWrite value corresponding to current state of the lock.
// This is typically called before (_not after_) loading the state when initializing a consumer
// of the data protected by the lock.
// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead.
//
// The caller must hold the lock (for reading or writing).
func (l *LockFile) GetLastWrite() (LastWrite, error) {
l.AssertLocked()
contents := make([]byte, lastWriterIDSize)
n, err := unix.Pread(int(l.fd), contents, 0)
if err != nil {
return LastWrite{}, err
}
// It is important to handle the partial read case, because
// the initial size of the lock file is zero, which is a valid
// state (no writes yet)
contents = contents[:n]
return newLastWriteFromData(contents), nil
}
// RecordWrite updates the lock with a new LastWrite value, and returns the new value.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should keep using the previously-recorded LastWrite value,
// and possibly detecting its own modification as an external one:
//
// lw, err := state.lock.RecordWrite()
// if err != nil { /* fail */ }
// state.lastWrite = lw
//
// The caller must hold the lock for writing.
func (l *LockFile) RecordWrite() (LastWrite, error) {
l.AssertLockedForWriting()
lw := newLastWrite()
lockContents := lw.serialize()
n, err := unix.Pwrite(int(l.fd), lockContents, 0)
if err != nil {
return LastWrite{}, err
}
if n != len(lockContents) {
return LastWrite{}, unix.ENOSPC
}
return lw, nil
}
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *LockFile) TouchedSince(when time.Time) bool {
st, err := system.Fstat(int(l.fd))
if err != nil {
return true
}
mtim := st.Mtim()
touched := time.Unix(mtim.Unix())
return when.Before(touched)
}
func openHandle(path string, mode int) (fileHandle, error) {
mode |= unix.O_CLOEXEC
fd, err := unix.Open(path, mode, 0o644)
return fileHandle(fd), err
}
func lockHandle(fd fileHandle, lType lockType, nonblocking bool) error {
fType := unix.F_RDLCK
if lType != readLock {
fType = unix.F_WRLCK
}
lk := unix.Flock_t{
Type: int16(fType),
Whence: int16(unix.SEEK_SET),
Start: 0,
Len: 0,
}
cmd := unix.F_SETLKW
if nonblocking {
cmd = unix.F_SETLK
}
for {
err := unix.FcntlFlock(uintptr(fd), cmd, &lk)
if err == nil || nonblocking {
return err
}
time.Sleep(10 * time.Millisecond)
}
}
func unlockAndCloseHandle(fd fileHandle) {
unix.Close(int(fd))
}
func closeHandle(fd fileHandle) {
unix.Close(int(fd))
}

View File

@@ -0,0 +1,110 @@
//go:build windows
// +build windows
package lockfile
import (
"os"
"time"
"golang.org/x/sys/windows"
)
const (
reserved = 0
allBytes = ^uint32(0)
)
type fileHandle windows.Handle
// GetLastWrite returns a LastWrite value corresponding to current state of the lock.
// This is typically called before (_not after_) loading the state when initializing a consumer
// of the data protected by the lock.
// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead.
//
// The caller must hold the lock (for reading or writing) before this function is called.
func (l *LockFile) GetLastWrite() (LastWrite, error) {
l.AssertLocked()
contents := make([]byte, lastWriterIDSize)
ol := new(windows.Overlapped)
var n uint32
err := windows.ReadFile(windows.Handle(l.fd), contents, &n, ol)
if err != nil && err != windows.ERROR_HANDLE_EOF {
return LastWrite{}, err
}
// It is important to handle the partial read case, because
// the initial size of the lock file is zero, which is a valid
// state (no writes yet)
contents = contents[:n]
return newLastWriteFromData(contents), nil
}
// RecordWrite updates the lock with a new LastWrite value, and returns the new value.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should keep using the previously-recorded LastWrite value,
// and possibly detecting its own modification as an external one:
//
// lw, err := state.lock.RecordWrite()
// if err != nil { /* fail */ }
// state.lastWrite = lw
//
// The caller must hold the lock for writing.
func (l *LockFile) RecordWrite() (LastWrite, error) {
l.AssertLockedForWriting()
lw := newLastWrite()
lockContents := lw.serialize()
ol := new(windows.Overlapped)
var n uint32
err := windows.WriteFile(windows.Handle(l.fd), lockContents, &n, ol)
if err != nil {
return LastWrite{}, err
}
if int(n) != len(lockContents) {
return LastWrite{}, windows.ERROR_DISK_FULL
}
return lw, nil
}
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *LockFile) TouchedSince(when time.Time) bool {
stat, err := os.Stat(l.file)
if err != nil {
return true
}
return when.Before(stat.ModTime())
}
func openHandle(path string, mode int) (fileHandle, error) {
mode |= windows.O_CLOEXEC
fd, err := windows.Open(path, mode, windows.S_IWRITE)
return fileHandle(fd), err
}
func lockHandle(fd fileHandle, lType lockType, nonblocking bool) error {
flags := 0
if lType != readLock {
flags = windows.LOCKFILE_EXCLUSIVE_LOCK
}
if nonblocking {
flags |= windows.LOCKFILE_FAIL_IMMEDIATELY
}
ol := new(windows.Overlapped)
if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil {
if nonblocking {
return err
}
panic(err)
}
return nil
}
func unlockAndCloseHandle(fd fileHandle) {
ol := new(windows.Overlapped)
windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol)
closeHandle(fd)
}
func closeHandle(fd fileHandle) {
windows.Close(windows.Handle(fd))
}

149
vendor/github.com/containers/storage/pkg/mount/flags.go generated vendored Normal file
View File

@@ -0,0 +1,149 @@
package mount
import (
"fmt"
"strings"
)
var flags = map[string]struct {
clear bool
flag int
}{
"defaults": {false, 0},
"ro": {false, RDONLY},
"rw": {true, RDONLY},
"suid": {true, NOSUID},
"nosuid": {false, NOSUID},
"dev": {true, NODEV},
"nodev": {false, NODEV},
"exec": {true, NOEXEC},
"noexec": {false, NOEXEC},
"sync": {false, SYNCHRONOUS},
"async": {true, SYNCHRONOUS},
"dirsync": {false, DIRSYNC},
"remount": {false, REMOUNT},
"mand": {false, MANDLOCK},
"nomand": {true, MANDLOCK},
"atime": {true, NOATIME},
"noatime": {false, NOATIME},
"diratime": {true, NODIRATIME},
"nodiratime": {false, NODIRATIME},
"bind": {false, BIND},
"rbind": {false, RBIND},
"unbindable": {false, UNBINDABLE},
"runbindable": {false, RUNBINDABLE},
"private": {false, PRIVATE},
"rprivate": {false, RPRIVATE},
"shared": {false, SHARED},
"rshared": {false, RSHARED},
"slave": {false, SLAVE},
"rslave": {false, RSLAVE},
"relatime": {false, RELATIME},
"norelatime": {true, RELATIME},
"strictatime": {false, STRICTATIME},
"nostrictatime": {true, STRICTATIME},
}
var validFlags = map[string]bool{
"": true,
"size": true,
"mode": true,
"uid": true,
"gid": true,
"nr_inodes": true,
"nr_blocks": true,
"mpol": true,
}
var propagationFlags = map[string]bool{
"bind": true,
"rbind": true,
"unbindable": true,
"runbindable": true,
"private": true,
"rprivate": true,
"shared": true,
"rshared": true,
"slave": true,
"rslave": true,
}
// MergeTmpfsOptions merge mount options to make sure there is no duplicate.
func MergeTmpfsOptions(options []string) ([]string, error) {
// We use collisions maps to remove duplicates.
// For flag, the key is the flag value (the key for propagation flag is -1)
// For data=value, the key is the data
flagCollisions := map[int]bool{}
dataCollisions := map[string]bool{}
var newOptions []string
// We process in reverse order
for i := len(options) - 1; i >= 0; i-- {
option := options[i]
if option == "defaults" {
continue
}
if f, ok := flags[option]; ok && f.flag != 0 {
// There is only one propagation mode
key := f.flag
if propagationFlags[option] {
key = -1
}
// Check to see if there is collision for flag
if !flagCollisions[key] {
// We prepend the option and add to collision map
newOptions = append([]string{option}, newOptions...)
flagCollisions[key] = true
}
continue
}
opt := strings.SplitN(option, "=", 2)
if len(opt) != 2 || !validFlags[opt[0]] {
return nil, fmt.Errorf("invalid tmpfs option %q", opt)
}
if !dataCollisions[opt[0]] {
// We prepend the option and add to collision map
newOptions = append([]string{option}, newOptions...)
dataCollisions[opt[0]] = true
}
}
return newOptions, nil
}
// ParseOptions parses fstab type mount options into mount() flags
// and device specific data
func ParseOptions(options string) (int, string) {
var (
flag int
data []string
)
for _, o := range strings.Split(options, ",") {
// If the option does not exist in the flags table or the flag
// is not supported on the platform,
// then it is a data value for a specific fs type
if f, exists := flags[o]; exists && f.flag != 0 {
if f.clear {
flag &= ^f.flag
} else {
flag |= f.flag
}
} else {
data = append(data, o)
}
}
return flag, strings.Join(data, ",")
}
// ParseTmpfsOptions parse fstab type mount options into flags and data
func ParseTmpfsOptions(options string) (int, string, error) {
flags, data := ParseOptions(options)
for _, o := range strings.Split(data, ",") {
opt := strings.SplitN(o, "=", 2)
if !validFlags[opt[0]] {
return 0, "", fmt.Errorf("invalid tmpfs option %q", opt)
}
}
return flags, data, nil
}

View File

@@ -0,0 +1,48 @@
package mount
import (
"golang.org/x/sys/unix"
)
const (
// RDONLY will mount the file system read-only.
RDONLY = unix.MNT_RDONLY
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
// take effect.
NOSUID = unix.MNT_NOSUID
// NOEXEC will not allow execution of any binaries on the mounted file system.
NOEXEC = unix.MNT_NOEXEC
// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
SYNCHRONOUS = unix.MNT_SYNCHRONOUS
// REMOUNT will attempt to remount an already-mounted file system. This is
// commonly used to change the mount flags for a file system, especially to
// make a readonly file system writeable. It does not change device or mount
// point.
REMOUNT = unix.MNT_UPDATE
// NOATIME will not update the file access time when reading from a file.
NOATIME = unix.MNT_NOATIME
mntDetach = unix.MNT_FORCE
NODIRATIME = 0
NODEV = 0
DIRSYNC = 0
MANDLOCK = 0
BIND = 0
RBIND = 0
UNBINDABLE = 0
RUNBINDABLE = 0
PRIVATE = 0
RPRIVATE = 0
SLAVE = 0
RSLAVE = 0
SHARED = 0
RSHARED = 0
RELATIME = 0
STRICTATIME = 0
)

View File

@@ -0,0 +1,87 @@
package mount
import (
"golang.org/x/sys/unix"
)
const (
// RDONLY will mount the file system read-only.
RDONLY = unix.MS_RDONLY
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
// take effect.
NOSUID = unix.MS_NOSUID
// NODEV will not interpret character or block special devices on the file
// system.
NODEV = unix.MS_NODEV
// NOEXEC will not allow execution of any binaries on the mounted file system.
NOEXEC = unix.MS_NOEXEC
// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
SYNCHRONOUS = unix.MS_SYNCHRONOUS
// DIRSYNC will force all directory updates within the file system to be done
// synchronously. This affects the following system calls: create, link,
// unlink, symlink, mkdir, rmdir, mknod and rename.
DIRSYNC = unix.MS_DIRSYNC
// REMOUNT will attempt to remount an already-mounted file system. This is
// commonly used to change the mount flags for a file system, especially to
// make a readonly file system writeable. It does not change device or mount
// point.
REMOUNT = unix.MS_REMOUNT
// MANDLOCK will force mandatory locks on a filesystem.
MANDLOCK = unix.MS_MANDLOCK
// NOATIME will not update the file access time when reading from a file.
NOATIME = unix.MS_NOATIME
// NODIRATIME will not update the directory access time.
NODIRATIME = unix.MS_NODIRATIME
// BIND remounts a subtree somewhere else.
BIND = unix.MS_BIND
// RBIND remounts a subtree and all possible submounts somewhere else.
RBIND = unix.MS_BIND | unix.MS_REC
// UNBINDABLE creates a mount which cannot be cloned through a bind operation.
UNBINDABLE = unix.MS_UNBINDABLE
// RUNBINDABLE marks the entire mount tree as UNBINDABLE.
RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC
// PRIVATE creates a mount which carries no propagation abilities.
PRIVATE = unix.MS_PRIVATE
// RPRIVATE marks the entire mount tree as PRIVATE.
RPRIVATE = unix.MS_PRIVATE | unix.MS_REC
// SLAVE creates a mount which receives propagation from its master, but not
// vice versa.
SLAVE = unix.MS_SLAVE
// RSLAVE marks the entire mount tree as SLAVE.
RSLAVE = unix.MS_SLAVE | unix.MS_REC
// SHARED creates a mount which provides the ability to create mirrors of
// that mount such that mounts and unmounts within any of the mirrors
// propagate to the other mirrors.
SHARED = unix.MS_SHARED
// RSHARED marks the entire mount tree as SHARED.
RSHARED = unix.MS_SHARED | unix.MS_REC
// RELATIME updates inode access times relative to modify or change time.
RELATIME = unix.MS_RELATIME
// STRICTATIME allows to explicitly request full atime updates. This makes
// it possible for the kernel to default to relatime or noatime but still
// allow userspace to override it.
STRICTATIME = unix.MS_STRICTATIME
mntDetach = unix.MNT_DETACH
)

View File

@@ -0,0 +1,32 @@
//go:build !linux && !freebsd
// +build !linux,!freebsd
package mount
// These flags are unsupported.
const (
BIND = 0
DIRSYNC = 0
MANDLOCK = 0
NOATIME = 0
NODEV = 0
NODIRATIME = 0
NOEXEC = 0
NOSUID = 0
UNBINDABLE = 0
RUNBINDABLE = 0
PRIVATE = 0
RPRIVATE = 0
SHARED = 0
RSHARED = 0
SLAVE = 0
RSLAVE = 0
RBIND = 0
RELATIME = 0
RELATIVE = 0
REMOUNT = 0
STRICTATIME = 0
SYNCHRONOUS = 0
RDONLY = 0
mntDetach = 0
)

110
vendor/github.com/containers/storage/pkg/mount/mount.go generated vendored Normal file
View File

@@ -0,0 +1,110 @@
package mount
import (
"sort"
"strconv"
"strings"
)
// mountError holds an error from a mount or unmount operation
type mountError struct {
op string
source, target string
flags uintptr
data string
err error
}
// Error returns a string representation of mountError
func (e *mountError) Error() string {
out := e.op + " "
if e.source != "" {
out += e.source + ":" + e.target
} else {
out += e.target
}
if e.flags != uintptr(0) {
out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16)
}
if e.data != "" {
out += ", data: " + e.data
}
out += ": " + e.err.Error()
return out
}
// Cause returns the underlying cause of the error
func (e *mountError) Cause() error {
return e.err
}
// Unwrap returns the underlying cause of the error
func (e *mountError) Unwrap() error {
return e.err
}
// Mount will mount filesystem according to the specified configuration, on the
// condition that the target path is *not* already mounted. Options must be
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
// flags.go for supported option flags.
func Mount(device, target, mType, options string) error {
flag, data := ParseOptions(options)
if flag&REMOUNT != REMOUNT {
if mounted, err := Mounted(target); err != nil || mounted {
return err
}
}
return mount(device, target, mType, uintptr(flag), data)
}
// ForceMount will mount a filesystem according to the specified configuration,
// *regardless* if the target path is not already mounted. Options must be
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
// flags.go for supported option flags.
func ForceMount(device, target, mType, options string) error {
flag, data := ParseOptions(options)
return mount(device, target, mType, uintptr(flag), data)
}
// Unmount lazily unmounts a filesystem on supported platforms, otherwise
// does a normal unmount.
func Unmount(target string) error {
return unmount(target, mntDetach)
}
// RecursiveUnmount unmounts the target and all mounts underneath, starting with
// the deepest mount first.
func RecursiveUnmount(target string) error {
mounts, err := GetMounts()
if err != nil {
return err
}
// Make the deepest mount be first
sort.Slice(mounts, func(i, j int) bool {
return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint)
})
for i, m := range mounts {
if !strings.HasPrefix(m.Mountpoint, target) {
continue
}
if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 {
return err
// Ignore errors for submounts and continue trying to unmount others
// The final unmount should fail if there are any submounts remaining
}
}
return nil
}
// ForceUnmount lazily unmounts a filesystem on supported platforms,
// otherwise does a normal unmount.
//
// Deprecated: please use Unmount instead, it is identical.
func ForceUnmount(target string) error {
return unmount(target, mntDetach)
}

View File

@@ -0,0 +1,68 @@
//go:build freebsd && cgo
// +build freebsd,cgo
package mount
/*
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <sys/_iovec.h>
#include <sys/mount.h>
#include <sys/param.h>
*/
import "C"
import (
"fmt"
"strings"
"unsafe"
)
func allocateIOVecs(options []string) []C.struct_iovec {
out := make([]C.struct_iovec, len(options))
for i, option := range options {
out[i].iov_base = unsafe.Pointer(C.CString(option))
out[i].iov_len = C.size_t(len(option) + 1)
}
return out
}
func mount(device, target, mType string, flag uintptr, data string) error {
isNullFS := false
options := []string{"fspath", target}
if data != "" {
xs := strings.Split(data, ",")
for _, x := range xs {
if x == "bind" {
isNullFS = true
continue
}
opt := strings.SplitN(x, "=", 2)
options = append(options, opt[0])
if len(opt) == 2 {
options = append(options, opt[1])
} else {
options = append(options, "")
}
}
}
if isNullFS {
options = append(options, "fstype", "nullfs", "target", device)
} else {
options = append(options, "fstype", mType, "from", device)
}
rawOptions := allocateIOVecs(options)
for _, rawOption := range rawOptions {
defer C.free(rawOption.iov_base)
}
if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
reason := C.GoString(C.strerror(*C.__error()))
return fmt.Errorf("failed to call nmount: %s", reason)
}
return nil
}

View File

@@ -0,0 +1,74 @@
package mount
import (
"golang.org/x/sys/unix"
)
const (
// ptypes is the set propagation types.
ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE
// pflags is the full set valid flags for a change propagation call.
pflags = ptypes | unix.MS_REC | unix.MS_SILENT
// broflags is the combination of bind and read only
broflags = unix.MS_BIND | unix.MS_RDONLY
none = "none"
)
// isremount returns true if either device name or flags identify a remount request, false otherwise.
func isremount(device string, flags uintptr) bool {
switch {
// We treat device "" and "none" as a remount request to provide compatibility with
// requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts.
case flags&unix.MS_REMOUNT != 0, device == "", device == none:
return true
default:
return false
}
}
func mount(device, target, mType string, flags uintptr, data string) error {
oflags := flags &^ ptypes
if !isremount(device, flags) || data != "" {
// Initial call applying all non-propagation flags for mount
// or remount with changed data
if err := unix.Mount(device, target, mType, oflags, data); err != nil {
return &mountError{
op: "mount",
source: device,
target: target,
flags: oflags,
data: data,
err: err,
}
}
}
if flags&ptypes != 0 {
// Change the propagation type.
if err := unix.Mount("", target, "", flags&pflags, ""); err != nil {
return &mountError{
op: "remount",
target: target,
flags: flags & pflags,
err: err,
}
}
}
if oflags&broflags == broflags {
// Remount the bind to apply read only.
if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil {
return &mountError{
op: "remount-ro",
target: target,
flags: oflags | unix.MS_REMOUNT,
err: err,
}
}
}
return nil
}

View File

@@ -0,0 +1,9 @@
//go:build !linux && !(freebsd && cgo)
// +build !linux
// +build !freebsd !cgo
package mount
func mount(device, target, mType string, flag uintptr, data string) error {
panic("Not implemented")
}

View File

@@ -0,0 +1,13 @@
package mount
import (
"github.com/moby/sys/mountinfo"
)
type Info = mountinfo.Info
var Mounted = mountinfo.Mounted
func GetMounts() ([]*Info, error) {
return mountinfo.GetMounts(nil)
}

View File

@@ -0,0 +1,5 @@
package mount
import "github.com/moby/sys/mountinfo"
var PidMountInfo = mountinfo.PidMountInfo

View File

@@ -0,0 +1,64 @@
package mount
// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
// See the supported options in flags.go for further reference.
func MakeShared(mountPoint string) error {
return ensureMountedAs(mountPoint, SHARED)
}
// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
// See the supported options in flags.go for further reference.
func MakeRShared(mountPoint string) error {
return ensureMountedAs(mountPoint, RSHARED)
}
// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
// See the supported options in flags.go for further reference.
func MakePrivate(mountPoint string) error {
return ensureMountedAs(mountPoint, PRIVATE)
}
// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
// enabled. See the supported options in flags.go for further reference.
func MakeRPrivate(mountPoint string) error {
return ensureMountedAs(mountPoint, RPRIVATE)
}
// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
// See the supported options in flags.go for further reference.
func MakeSlave(mountPoint string) error {
return ensureMountedAs(mountPoint, SLAVE)
}
// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
// See the supported options in flags.go for further reference.
func MakeRSlave(mountPoint string) error {
return ensureMountedAs(mountPoint, RSLAVE)
}
// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
// enabled. See the supported options in flags.go for further reference.
func MakeUnbindable(mountPoint string) error {
return ensureMountedAs(mountPoint, UNBINDABLE)
}
// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
// option enabled. See the supported options in flags.go for further reference.
func MakeRUnbindable(mountPoint string) error {
return ensureMountedAs(mountPoint, RUNBINDABLE)
}
func ensureMountedAs(mnt string, flags int) error {
mounted, err := Mounted(mnt)
if err != nil {
return err
}
if !mounted {
if err := mount(mnt, mnt, "none", uintptr(BIND), ""); err != nil {
return err
}
}
return mount("", mnt, "none", uintptr(flags), "")
}

View File

@@ -0,0 +1,35 @@
//go:build !windows
// +build !windows
package mount
import (
"time"
"golang.org/x/sys/unix"
)
func unmount(target string, flags int) error {
var err error
for i := 0; i < 50; i++ {
err = unix.Unmount(target, flags)
switch err {
case unix.EBUSY:
time.Sleep(50 * time.Millisecond)
continue
case unix.EINVAL, nil:
// Ignore "not mounted" error here. Note the same error
// can be returned if flags are invalid, so this code
// assumes that the flags value is always correct.
return nil
}
break
}
return &mountError{
op: "umount",
target: target,
flags: uintptr(flags),
err: err,
}
}

View File

@@ -0,0 +1,8 @@
//go:build windows
// +build windows
package mount
func unmount(target string, flag int) error {
panic("Not implemented")
}

View File

@@ -0,0 +1,5 @@
# reexec
The `reexec` package facilitates the busybox style reexec of the docker binary that we require because
of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of
the exec of the binary will be used to find and execute custom init paths.

View File

@@ -0,0 +1,38 @@
//go:build freebsd
// +build freebsd
package reexec
import (
"context"
"os"
"os/exec"
"golang.org/x/sys/unix"
)
// Self returns the path to the current process's binary.
// Uses sysctl.
func Self() string {
path, err := unix.SysctlArgs("kern.proc.pathname", -1)
if err == nil {
return path
}
return os.Args[0]
}
// Command returns *exec.Cmd which has Path as current binary.
// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
// be set to "/usr/bin/docker".
func Command(args ...string) *exec.Cmd {
cmd := exec.Command(Self())
cmd.Args = args
return cmd
}
// CommandContext returns *exec.Cmd which has Path as current binary.
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
return cmd
}

View File

@@ -0,0 +1,35 @@
//go:build linux
// +build linux
package reexec
import (
"context"
"os/exec"
)
// Self returns the path to the current process's binary.
// Returns "/proc/self/exe".
func Self() string {
return "/proc/self/exe"
}
// Command returns *exec.Cmd which has Path as current binary.
// This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func Command(args ...string) *exec.Cmd {
panicIfNotInitialized()
cmd := exec.Command(Self())
cmd.Args = args
return cmd
}
// CommandContext returns *exec.Cmd which has Path as current binary.
// This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
panicIfNotInitialized()
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
return cmd
}

View File

@@ -0,0 +1,33 @@
//go:build solaris || darwin
// +build solaris darwin
package reexec
import (
"context"
"os/exec"
)
// Self returns the path to the current process's binary.
// Uses os.Args[0].
func Self() string {
return naiveSelf()
}
// Command returns *exec.Cmd which has Path as current binary.
// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
// be set to "/usr/bin/docker".
func Command(args ...string) *exec.Cmd {
panicIfNotInitialized()
cmd := exec.Command(Self())
cmd.Args = args
return cmd
}
// CommandContext returns *exec.Cmd which has Path as current binary.
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
panicIfNotInitialized()
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
return cmd
}

View File

@@ -0,0 +1,21 @@
//go:build !linux && !windows && !freebsd && !solaris && !darwin
// +build !linux,!windows,!freebsd,!solaris,!darwin
package reexec
import (
"context"
"os/exec"
)
// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin.
func Command(args ...string) *exec.Cmd {
panicIfNotInitialized()
return nil
}
// CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin.
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
panicIfNotInitialized()
return nil
}

View File

@@ -0,0 +1,35 @@
//go:build windows
// +build windows
package reexec
import (
"context"
"os/exec"
)
// Self returns the path to the current process's binary.
// Uses os.Args[0].
func Self() string {
return naiveSelf()
}
// Command returns *exec.Cmd which has Path as current binary.
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
// be set to "C:\docker.exe".
func Command(args ...string) *exec.Cmd {
panicIfNotInitialized()
cmd := exec.Command(Self())
cmd.Args = args
return cmd
}
// Command returns *exec.Cmd which has Path as current binary.
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
// be set to "C:\docker.exe".
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
panicIfNotInitialized()
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
return cmd
}

View File

@@ -0,0 +1,66 @@
package reexec
import (
"fmt"
"os"
"os/exec"
"path/filepath"
)
var (
registeredInitializers = make(map[string]func())
initWasCalled = false
)
// Register adds an initialization func under the specified name
func Register(name string, initializer func()) {
if _, exists := registeredInitializers[name]; exists {
panic(fmt.Sprintf("reexec func already registered under name %q", name))
}
registeredInitializers[name] = initializer
}
// Init is called as the first part of the exec process and returns true if an
// initialization function was called.
func Init() bool {
initializer, exists := registeredInitializers[os.Args[0]]
initWasCalled = true
if exists {
initializer()
return true
}
return false
}
func panicIfNotInitialized() {
if !initWasCalled {
// The reexec package is used to run subroutines in
// subprocesses which would otherwise have unacceptable side
// effects on the main thread. If you found this error, then
// your program uses a package which needs to do this. In
// order for that to work, main() should start with this
// boilerplate, or an equivalent:
// if reexec.Init() {
// return
// }
panic("a library subroutine needed to run a subprocess, but reexec.Init() was not called in main()")
}
}
func naiveSelf() string { //nolint: unused
name := os.Args[0]
if filepath.Base(name) == name {
if lp, err := exec.LookPath(name); err == nil {
return lp
}
}
// handle conversion of relative paths to absolute
if absName, err := filepath.Abs(name); err == nil {
return absName
}
// if we couldn't get absolute name, return original
// (NOTE: Go only errors on Abs() if os.Getwd fails)
return name
}

View File

@@ -0,0 +1,234 @@
package regexp
import (
"io"
"regexp"
"sync"
)
// Regexp is a wrapper struct used for wrapping MustCompile regex expressions
// used as global variables. Using this structure helps speed the startup time
// of apps that want to use global regex variables. This library initializes them on
// first use as opposed to the start of the executable.
type Regexp struct {
*regexpStruct
}
type regexpStruct struct {
_ noCopy
once sync.Once
regexp *regexp.Regexp
val string
}
func Delayed(val string) Regexp {
re := &regexpStruct{
val: val,
}
if precompile {
re.regexp = regexp.MustCompile(re.val)
}
return Regexp{re}
}
func (re *regexpStruct) compile() {
if precompile {
return
}
re.once.Do(func() {
re.regexp = regexp.MustCompile(re.val)
})
}
func (re *regexpStruct) Expand(dst []byte, template []byte, src []byte, match []int) []byte {
re.compile()
return re.regexp.Expand(dst, template, src, match)
}
func (re *regexpStruct) ExpandString(dst []byte, template string, src string, match []int) []byte {
re.compile()
return re.regexp.ExpandString(dst, template, src, match)
}
func (re *regexpStruct) Find(b []byte) []byte {
re.compile()
return re.regexp.Find(b)
}
func (re *regexpStruct) FindAll(b []byte, n int) [][]byte {
re.compile()
return re.regexp.FindAll(b, n)
}
func (re *regexpStruct) FindAllIndex(b []byte, n int) [][]int {
re.compile()
return re.regexp.FindAllIndex(b, n)
}
func (re *regexpStruct) FindAllString(s string, n int) []string {
re.compile()
return re.regexp.FindAllString(s, n)
}
func (re *regexpStruct) FindAllStringIndex(s string, n int) [][]int {
re.compile()
return re.regexp.FindAllStringIndex(s, n)
}
func (re *regexpStruct) FindAllStringSubmatch(s string, n int) [][]string {
re.compile()
return re.regexp.FindAllStringSubmatch(s, n)
}
func (re *regexpStruct) FindAllStringSubmatchIndex(s string, n int) [][]int {
re.compile()
return re.regexp.FindAllStringSubmatchIndex(s, n)
}
func (re *regexpStruct) FindAllSubmatch(b []byte, n int) [][][]byte {
re.compile()
return re.regexp.FindAllSubmatch(b, n)
}
func (re *regexpStruct) FindAllSubmatchIndex(b []byte, n int) [][]int {
re.compile()
return re.regexp.FindAllSubmatchIndex(b, n)
}
func (re *regexpStruct) FindIndex(b []byte) (loc []int) {
re.compile()
return re.regexp.FindIndex(b)
}
func (re *regexpStruct) FindReaderIndex(r io.RuneReader) (loc []int) {
re.compile()
return re.regexp.FindReaderIndex(r)
}
func (re *regexpStruct) FindReaderSubmatchIndex(r io.RuneReader) []int {
re.compile()
return re.regexp.FindReaderSubmatchIndex(r)
}
func (re *regexpStruct) FindString(s string) string {
re.compile()
return re.regexp.FindString(s)
}
func (re *regexpStruct) FindStringIndex(s string) (loc []int) {
re.compile()
return re.regexp.FindStringIndex(s)
}
func (re *regexpStruct) FindStringSubmatch(s string) []string {
re.compile()
return re.regexp.FindStringSubmatch(s)
}
func (re *regexpStruct) FindStringSubmatchIndex(s string) []int {
re.compile()
return re.regexp.FindStringSubmatchIndex(s)
}
func (re *regexpStruct) FindSubmatch(b []byte) [][]byte {
re.compile()
return re.regexp.FindSubmatch(b)
}
func (re *regexpStruct) FindSubmatchIndex(b []byte) []int {
re.compile()
return re.regexp.FindSubmatchIndex(b)
}
func (re *regexpStruct) LiteralPrefix() (prefix string, complete bool) {
re.compile()
return re.regexp.LiteralPrefix()
}
func (re *regexpStruct) Longest() {
re.compile()
re.regexp.Longest()
}
func (re *regexpStruct) Match(b []byte) bool {
re.compile()
return re.regexp.Match(b)
}
func (re *regexpStruct) MatchReader(r io.RuneReader) bool {
re.compile()
return re.regexp.MatchReader(r)
}
func (re *regexpStruct) MatchString(s string) bool {
re.compile()
return re.regexp.MatchString(s)
}
func (re *regexpStruct) NumSubexp() int {
re.compile()
return re.regexp.NumSubexp()
}
func (re *regexpStruct) ReplaceAll(src, repl []byte) []byte {
re.compile()
return re.regexp.ReplaceAll(src, repl)
}
func (re *regexpStruct) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte {
re.compile()
return re.regexp.ReplaceAllFunc(src, repl)
}
func (re *regexpStruct) ReplaceAllLiteral(src, repl []byte) []byte {
re.compile()
return re.regexp.ReplaceAllLiteral(src, repl)
}
func (re *regexpStruct) ReplaceAllLiteralString(src, repl string) string {
re.compile()
return re.regexp.ReplaceAllLiteralString(src, repl)
}
func (re *regexpStruct) ReplaceAllString(src, repl string) string {
re.compile()
return re.regexp.ReplaceAllString(src, repl)
}
func (re *regexpStruct) ReplaceAllStringFunc(src string, repl func(string) string) string {
re.compile()
return re.regexp.ReplaceAllStringFunc(src, repl)
}
func (re *regexpStruct) Split(s string, n int) []string {
re.compile()
return re.regexp.Split(s, n)
}
func (re *regexpStruct) String() string {
re.compile()
return re.regexp.String()
}
func (re *regexpStruct) SubexpIndex(name string) int {
re.compile()
return re.regexp.SubexpIndex(name)
}
func (re *regexpStruct) SubexpNames() []string {
re.compile()
return re.regexp.SubexpNames()
}
// noCopy may be added to structs which must not be copied
// after the first use.
//
// See https://golang.org/issues/8005#issuecomment-190753527
// for details.
//
// Note that it must not be embedded, due to the Lock and Unlock methods.
type noCopy struct{}
// Lock is a no-op used by -copylocks checker from `go vet`.
func (*noCopy) Lock() {}
func (*noCopy) Unlock() {}

View File

@@ -0,0 +1,6 @@
//go:build !regexp_precompile
// +build !regexp_precompile
package regexp
const precompile = false

View File

@@ -0,0 +1,6 @@
//go:build regexp_precompile
// +build regexp_precompile
package regexp
const precompile = true

View File

@@ -0,0 +1,17 @@
package system
import (
"errors"
"os"
"syscall"
)
func Chmod(name string, mode os.FileMode) error {
err := os.Chmod(name, mode)
for err != nil && errors.Is(err, syscall.EINTR) {
err = os.Chmod(name, mode)
}
return err
}

View File

@@ -0,0 +1,35 @@
package system
import (
"os"
"time"
)
// Chtimes changes the access time and modified time of a file at the given path
func Chtimes(name string, atime time.Time, mtime time.Time) error {
unixMinTime := time.Unix(0, 0)
unixMaxTime := maxTime
// If the modified time is prior to the Unix Epoch, or after the
// end of Unix Time, os.Chtimes has undefined behavior
// default to Unix Epoch in this case, just in case
if atime.Before(unixMinTime) || atime.After(unixMaxTime) {
atime = unixMinTime
}
if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) {
mtime = unixMinTime
}
if err := os.Chtimes(name, atime, mtime); err != nil {
return err
}
// Take platform specific action for setting create time.
if err := setCTime(name, mtime); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,15 @@
//go:build !windows
// +build !windows
package system
import (
"time"
)
// setCTime will set the create time on a file. On Unix, the create
// time is updated as a side effect of setting the modified time, so
// no action is required.
func setCTime(path string, ctime time.Time) error {
return nil
}

View File

@@ -0,0 +1,29 @@
//go:build windows
// +build windows
package system
import (
"time"
"golang.org/x/sys/windows"
)
// setCTime will set the create time on a file. On Windows, this requires
// calling SetFileTime and explicitly including the create time.
func setCTime(path string, ctime time.Time) error {
ctimespec := windows.NsecToTimespec(ctime.UnixNano())
pathp, e := windows.UTF16PtrFromString(path)
if e != nil {
return e
}
h, e := windows.CreateFile(pathp,
windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil,
windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
if e != nil {
return e
}
defer windows.Close(h)
c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec))
return windows.SetFileTime(h, &c, nil, nil)
}

View File

@@ -0,0 +1,8 @@
package system
import (
"errors"
)
// ErrNotSupportedPlatform means the platform is not supported.
var ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")

View File

@@ -0,0 +1,33 @@
package system
import (
"fmt"
"os/exec"
"syscall"
)
// GetExitCode returns the ExitStatus of the specified error if its type is
// exec.ExitError, returns 0 and an error otherwise.
func GetExitCode(err error) (int, error) {
exitCode := 0
if exiterr, ok := err.(*exec.ExitError); ok {
if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok {
return procExit.ExitStatus(), nil
}
}
return exitCode, fmt.Errorf("failed to get exit code")
}
// ProcessExitCode process the specified error and returns the exit status code
// if the error was of type exec.ExitError, returns nothing otherwise.
func ProcessExitCode(err error) (exitCode int) {
if err != nil {
var exiterr error
if exitCode, exiterr = GetExitCode(err); exiterr != nil {
// TODO: Fix this so we check the error's text.
// we've failed to retrieve exit code, so we set it to 127
exitCode = 127
}
}
return
}

View File

@@ -0,0 +1,22 @@
package system
import (
"syscall"
"time"
"unsafe"
)
// maxTime is used by chtimes.
var maxTime time.Time
func init() {
// chtimes initialization
if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
// This is a 64 bit timespec
// os.Chtimes limits time to the following
maxTime = time.Unix(0, 1<<63-1)
} else {
// This is a 32 bit timespec
maxTime = time.Unix(1<<31-1, 0)
}
}

View File

@@ -0,0 +1,16 @@
package system
import "os"
// LCOWSupported determines if Linux Containers on Windows are supported.
// Note: This feature is in development (06/17) and enabled through an
// environment variable. At a future time, it will be enabled based
// on build number. @jhowardmsft
var lcowSupported = false
func init() {
// LCOW initialization
if os.Getenv("LCOW_SUPPORTED") != "" {
lcowSupported = true
}
}

View File

@@ -0,0 +1,56 @@
//go:build freebsd
// +build freebsd
package system
import (
"unsafe"
"golang.org/x/sys/unix"
)
// Flag values from <sys/stat.h>
const (
/*
* Definitions of flags stored in file flags word.
*
* Super-user and owner changeable flags.
*/
UF_SETTABLE uint32 = 0x0000ffff /* mask of owner changeable flags */
UF_NODUMP uint32 = 0x00000001 /* do not dump file */
UF_IMMUTABLE uint32 = 0x00000002 /* file may not be changed */
UF_APPEND uint32 = 0x00000004 /* writes to file may only append */
UF_OPAQUE uint32 = 0x00000008 /* directory is opaque wrt. union */
UF_NOUNLINK uint32 = 0x00000010 /* file may not be removed or renamed */
UF_SYSTEM uint32 = 0x00000080 /* Windows system file bit */
UF_SPARSE uint32 = 0x00000100 /* sparse file */
UF_OFFLINE uint32 = 0x00000200 /* file is offline */
UF_REPARSE uint32 = 0x00000400 /* Windows reparse point file bit */
UF_ARCHIVE uint32 = 0x00000800 /* file needs to be archived */
UF_READONLY uint32 = 0x00001000 /* Windows readonly file bit */
/* This is the same as the MacOS X definition of UF_HIDDEN. */
UF_HIDDEN uint32 = 0x00008000 /* file is hidden */
/*
* Super-user changeable flags.
*/
SF_SETTABLE uint32 = 0xffff0000 /* mask of superuser changeable flags */
SF_ARCHIVED uint32 = 0x00010000 /* file is archived */
SF_IMMUTABLE uint32 = 0x00020000 /* file may not be changed */
SF_APPEND uint32 = 0x00040000 /* writes to file may only append */
SF_NOUNLINK uint32 = 0x00100000 /* file may not be removed or renamed */
SF_SNAPSHOT uint32 = 0x00200000 /* snapshot inode */
)
func Lchflags(path string, flags uint32) error {
p, err := unix.BytePtrFromString(path)
if err != nil {
return err
}
_, _, e1 := unix.Syscall(unix.SYS_LCHFLAGS, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
if e1 != 0 {
return e1
}
return nil
}

View File

@@ -0,0 +1,20 @@
package system
import (
"os"
"syscall"
)
func Lchown(name string, uid, gid int) error {
err := syscall.Lchown(name, uid, gid)
for err == syscall.EINTR {
err = syscall.Lchown(name, uid, gid)
}
if err != nil {
return &os.PathError{Op: "lchown", Path: name, Err: err}
}
return nil
}

View File

@@ -0,0 +1,9 @@
//go:build !windows
// +build !windows
package system
// LCOWSupported returns true if Linux containers on Windows are supported.
func LCOWSupported() bool {
return false
}

View File

@@ -0,0 +1,6 @@
package system
// LCOWSupported returns true if Linux containers on Windows are supported.
func LCOWSupported() bool {
return lcowSupported
}

View File

@@ -0,0 +1,21 @@
//go:build !windows
// +build !windows
package system
import (
"os"
"syscall"
)
// Lstat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//
// Throws an error if the file does not exist
func Lstat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Lstat(path, s); err != nil {
return nil, &os.PathError{Op: "Lstat", Path: path, Err: err}
}
return fromStatT(s)
}

View File

@@ -0,0 +1,14 @@
package system
import "os"
// Lstat calls os.Lstat to get a fileinfo interface back.
// This is then copied into our own locally defined structure.
func Lstat(path string) (*StatT, error) {
fi, err := os.Lstat(path)
if err != nil {
return nil, err
}
return fromStatT(&fi)
}

View File

@@ -0,0 +1,17 @@
package system
// MemInfo contains memory statistics of the host system.
type MemInfo struct {
// Total usable RAM (i.e. physical RAM minus a few reserved bits and the
// kernel binary code).
MemTotal int64
// Amount of free memory.
MemFree int64
// Total amount of swap space available.
SwapTotal int64
// Amount of swap space that is currently unused.
SwapFree int64
}

View File

@@ -0,0 +1,86 @@
//go:build freebsd && cgo
// +build freebsd,cgo
package system
import (
"errors"
"fmt"
"unsafe"
"golang.org/x/sys/unix"
)
// #include <unistd.h>
// #include <sys/vmmeter.h>
// #include <sys/sysctl.h>
// #include <vm/vm_param.h>
import "C"
func getMemInfo() (int64, int64, error) {
data, err := unix.SysctlRaw("vm.vmtotal")
if err != nil {
return -1, -1, fmt.Errorf("can't get kernel info: %w", err)
}
if len(data) != C.sizeof_struct_vmtotal {
return -1, -1, fmt.Errorf("unexpected vmtotal size %d", len(data))
}
total := (*C.struct_vmtotal)(unsafe.Pointer(&data[0]))
pagesize := int64(C.sysconf(C._SC_PAGESIZE))
npages := int64(C.sysconf(C._SC_PHYS_PAGES))
return pagesize * npages, pagesize * int64(total.t_free), nil
}
func getSwapInfo() (int64, int64, error) {
var (
total int64 = 0
used int64 = 0
)
swapCount, err := unix.SysctlUint32("vm.nswapdev")
if err != nil {
return -1, -1, fmt.Errorf("reading vm.nswapdev: %w", err)
}
for i := 0; i < int(swapCount); i++ {
data, err := unix.SysctlRaw("vm.swap_info", i)
if err != nil {
return -1, -1, fmt.Errorf("reading vm.swap_info.%d: %w", i, err)
}
if len(data) != C.sizeof_struct_xswdev {
return -1, -1, fmt.Errorf("unexpected swap_info size %d", len(data))
}
xsw := (*C.struct_xswdev)(unsafe.Pointer(&data[0]))
total += int64(xsw.xsw_nblks)
used += int64(xsw.xsw_used)
}
pagesize := int64(C.sysconf(C._SC_PAGESIZE))
return pagesize * total, pagesize * (total - used), nil
}
// ReadMemInfo retrieves memory statistics of the host system and returns a
//
// MemInfo type.
func ReadMemInfo() (*MemInfo, error) {
MemTotal, MemFree, err := getMemInfo()
if err != nil {
return nil, fmt.Errorf("getting memory totals %w", err)
}
SwapTotal, SwapFree, err := getSwapInfo()
if err != nil {
return nil, fmt.Errorf("getting swap totals %w", err)
}
if MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 {
return nil, errors.New("getting system memory info")
}
meminfo := &MemInfo{}
// Total memory is total physical memory less than memory locked by kernel
meminfo.MemTotal = MemTotal
meminfo.MemFree = MemFree
meminfo.SwapTotal = SwapTotal
meminfo.SwapFree = SwapFree
return meminfo, nil
}

View File

@@ -0,0 +1,65 @@
package system
import (
"bufio"
"io"
"os"
"strconv"
"strings"
"github.com/docker/go-units"
)
// ReadMemInfo retrieves memory statistics of the host system and returns a
// MemInfo type.
func ReadMemInfo() (*MemInfo, error) {
file, err := os.Open("/proc/meminfo")
if err != nil {
return nil, err
}
defer file.Close()
return parseMemInfo(file)
}
// parseMemInfo parses the /proc/meminfo file into
// a MemInfo object given an io.Reader to the file.
// Throws error if there are problems reading from the file
func parseMemInfo(reader io.Reader) (*MemInfo, error) {
meminfo := &MemInfo{}
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
// Expected format: ["MemTotal:", "1234", "kB"]
parts := strings.Fields(scanner.Text())
// Sanity checks: Skip malformed entries.
if len(parts) < 3 || parts[2] != "kB" {
continue
}
// Convert to bytes.
size, err := strconv.Atoi(parts[1])
if err != nil {
continue
}
bytes := int64(size) * units.KiB
switch parts[0] {
case "MemTotal:":
meminfo.MemTotal = bytes
case "MemFree:":
meminfo.MemFree = bytes
case "SwapTotal:":
meminfo.SwapTotal = bytes
case "SwapFree:":
meminfo.SwapFree = bytes
}
}
// Handle errors that may have occurred during the reading of the file.
if err := scanner.Err(); err != nil {
return nil, err
}
return meminfo, nil
}

View File

@@ -0,0 +1,130 @@
//go:build solaris && cgo
// +build solaris,cgo
package system
import (
"fmt"
"unsafe"
)
// #cgo CFLAGS: -std=c99
// #cgo LDFLAGS: -lkstat
// #include <unistd.h>
// #include <stdlib.h>
// #include <stdio.h>
// #include <kstat.h>
// #include <sys/swap.h>
// #include <sys/param.h>
// struct swaptable *allocSwaptable(int num) {
// struct swaptable *st;
// struct swapent *swapent;
// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int));
// swapent = st->swt_ent;
// for (int i = 0; i < num; i++,swapent++) {
// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char));
// }
// st->swt_n = num;
// return st;
//}
// void freeSwaptable (struct swaptable *st) {
// struct swapent *swapent = st->swt_ent;
// for (int i = 0; i < st->swt_n; i++,swapent++) {
// free(swapent->ste_path);
// }
// free(st);
// }
// swapent_t getSwapEnt(swapent_t *ent, int i) {
// return ent[i];
// }
// int64_t getPpKernel() {
// int64_t pp_kernel = 0;
// kstat_ctl_t *ksc;
// kstat_t *ks;
// kstat_named_t *knp;
// kid_t kid;
//
// if ((ksc = kstat_open()) == NULL) {
// return -1;
// }
// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) {
// return -1;
// }
// if (((kid = kstat_read(ksc, ks, NULL)) == -1) ||
// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) {
// return -1;
// }
// switch (knp->data_type) {
// case KSTAT_DATA_UINT64:
// pp_kernel = knp->value.ui64;
// break;
// case KSTAT_DATA_UINT32:
// pp_kernel = knp->value.ui32;
// break;
// }
// pp_kernel *= sysconf(_SC_PAGESIZE);
// return (pp_kernel > 0 ? pp_kernel : -1);
// }
import "C"
// Get the system memory info using sysconf same as prtconf
func getTotalMem() int64 {
pagesize := C.sysconf(C._SC_PAGESIZE)
npages := C.sysconf(C._SC_PHYS_PAGES)
return int64(pagesize * npages)
}
func getFreeMem() int64 {
pagesize := C.sysconf(C._SC_PAGESIZE)
npages := C.sysconf(C._SC_AVPHYS_PAGES)
return int64(pagesize * npages)
}
// ReadMemInfo retrieves memory statistics of the host system and returns a
//
// MemInfo type.
func ReadMemInfo() (*MemInfo, error) {
ppKernel := C.getPpKernel()
MemTotal := getTotalMem()
MemFree := getFreeMem()
SwapTotal, SwapFree, err := getSysSwap()
if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
SwapFree < 0 {
return nil, fmt.Errorf("getting system memory info %w", err)
}
meminfo := &MemInfo{}
// Total memory is total physical memory less than memory locked by kernel
meminfo.MemTotal = MemTotal - int64(ppKernel)
meminfo.MemFree = MemFree
meminfo.SwapTotal = SwapTotal
meminfo.SwapFree = SwapFree
return meminfo, nil
}
func getSysSwap() (int64, int64, error) {
var tSwap int64
var fSwap int64
var diskblksPerPage int64
num, err := C.swapctl(C.SC_GETNSWP, nil)
if err != nil {
return -1, -1, err
}
st := C.allocSwaptable(num)
_, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st))
if err != nil {
C.freeSwaptable(st)
return -1, -1, err
}
diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT)
for i := 0; i < int(num); i++ {
swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i))
tSwap += int64(swapent.ste_pages) * diskblksPerPage
fSwap += int64(swapent.ste_free) * diskblksPerPage
}
C.freeSwaptable(st)
return tSwap, fSwap, nil
}

View File

@@ -0,0 +1,12 @@
//go:build !linux && !windows && !solaris && !(freebsd && cgo)
// +build !linux
// +build !windows
// +build !solaris
// +build !freebsd !cgo
package system
// ReadMemInfo is not supported on platforms other than linux and windows.
func ReadMemInfo() (*MemInfo, error) {
return nil, ErrNotSupportedPlatform
}

View File

@@ -0,0 +1,46 @@
package system
import (
"unsafe"
"golang.org/x/sys/windows"
)
var (
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
)
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx
type memorystatusex struct {
dwLength uint32
dwMemoryLoad uint32
ullTotalPhys uint64
ullAvailPhys uint64
ullTotalPageFile uint64
ullAvailPageFile uint64
ullTotalVirtual uint64
ullAvailVirtual uint64
ullAvailExtendedVirtual uint64
}
// ReadMemInfo retrieves memory statistics of the host system and returns a
//
// MemInfo type.
func ReadMemInfo() (*MemInfo, error) {
msi := &memorystatusex{
dwLength: 64,
}
r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi)))
if r1 == 0 {
return &MemInfo{}, nil
}
return &MemInfo{
MemTotal: int64(msi.ullTotalPhys),
MemFree: int64(msi.ullAvailPhys),
SwapTotal: int64(msi.ullTotalPageFile),
SwapFree: int64(msi.ullAvailPageFile),
}, nil
}

View File

@@ -0,0 +1,23 @@
//go:build !windows && !freebsd
// +build !windows,!freebsd
package system
import (
"golang.org/x/sys/unix"
)
// Mknod creates a filesystem node (file, device special file or named pipe) named path
// with attributes specified by mode and dev.
func Mknod(path string, mode uint32, dev uint32) error {
return unix.Mknod(path, mode, int(dev))
}
// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
// and minor number of the newly created device special file.
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
// then the top 12 bits of the minor.
func Mkdev(major int64, minor int64) uint32 {
return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
}

View File

@@ -0,0 +1,23 @@
//go:build freebsd
// +build freebsd
package system
import (
"golang.org/x/sys/unix"
)
// Mknod creates a filesystem node (file, device special file or named pipe) named path
// with attributes specified by mode and dev.
func Mknod(path string, mode uint32, dev uint64) error {
return unix.Mknod(path, mode, dev)
}
// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
// and minor number of the newly created device special file.
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
// then the top 12 bits of the minor.
func Mkdev(major int64, minor int64) uint64 {
return uint64(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
}

View File

@@ -0,0 +1,14 @@
//go:build windows
// +build windows
package system
// Mknod is not implemented on Windows.
func Mknod(path string, mode uint32, dev int) error {
return ErrNotSupportedPlatform
}
// Mkdev is not implemented on Windows.
func Mkdev(major int64, minor int64) uint32 {
panic("Mkdev not implemented on Windows.")
}

View File

@@ -0,0 +1,20 @@
package system
import "runtime"
const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
// DefaultPathEnv is unix style list of directories to search for
// executables. Each directory is separated from the next by a colon
// ':' character .
func DefaultPathEnv(platform string) string {
if runtime.GOOS == "windows" {
if platform != runtime.GOOS && LCOWSupported() {
return defaultUnixPathEnv
}
// Deliberately empty on Windows containers on Windows as the default path will be set by
// the container. Docker has no context of what the default path should be.
return ""
}
return defaultUnixPathEnv
}

View File

@@ -0,0 +1,10 @@
//go:build !windows
// +build !windows
package system
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
// is the system drive. This is a no-op on Linux.
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
return path, nil
}

View File

@@ -0,0 +1,34 @@
//go:build windows
// +build windows
package system
import (
"fmt"
"path/filepath"
"strings"
)
// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
// This is used, for example, when validating a user provided path in docker cp.
// If a drive letter is supplied, it must be the system drive. The drive letter
// is always removed. Also, it translates it to OS semantics (IOW / to \). We
// need the path in this syntax so that it can ultimately be concatenated with
// a Windows long-path which doesn't support drive-letters. Examples:
// C: --> Fail
// C:\ --> \
// a --> a
// /a --> \a
// d:\ --> Fail
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
if len(path) == 2 && string(path[1]) == ":" {
return "", fmt.Errorf("relative path not specified in %q", path)
}
if !filepath.IsAbs(path) || len(path) < 2 {
return filepath.FromSlash(path), nil
}
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
return "", fmt.Errorf("specified path is not on the system drive (C:)")
}
return filepath.FromSlash(path[2:]), nil
}

View File

@@ -0,0 +1,25 @@
//go:build linux || freebsd || solaris || darwin
// +build linux freebsd solaris darwin
package system
import (
"syscall"
"golang.org/x/sys/unix"
)
// IsProcessAlive returns true if process with a given pid is running.
func IsProcessAlive(pid int) bool {
err := unix.Kill(pid, syscall.Signal(0))
if err == nil || err == unix.EPERM {
return true
}
return false
}
// KillProcess force-stops a process.
func KillProcess(pid int) {
_ = unix.Kill(pid, unix.SIGKILL)
}

99
vendor/github.com/containers/storage/pkg/system/rm.go generated vendored Normal file
View File

@@ -0,0 +1,99 @@
package system
import (
"errors"
"fmt"
"os"
"syscall"
"time"
"github.com/containers/storage/pkg/mount"
"github.com/sirupsen/logrus"
)
// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can
// often be remedied.
// Only use `EnsureRemoveAll` if you really want to make every effort to remove
// a directory.
//
// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there
// can be a race between reading directory entries and then actually attempting
// to remove everything in the directory.
// These types of errors do not need to be returned since it's ok for the dir to
// be gone we can just retry the remove operation.
//
// This should not return a `os.ErrNotExist` kind of error under any circumstances
func EnsureRemoveAll(dir string) error {
notExistErr := make(map[string]bool)
// track retries
exitOnErr := make(map[string]int)
maxRetry := 1000
// Attempt a simple remove all first, this avoids the more expensive
// RecursiveUnmount call if not needed.
if err := os.RemoveAll(dir); err == nil {
return nil
}
// Attempt to unmount anything beneath this dir first
if err := mount.RecursiveUnmount(dir); err != nil {
logrus.Debugf("RecursiveUnmount on %s failed: %v", dir, err)
}
for {
err := os.RemoveAll(dir)
if err == nil {
return nil
}
// If the RemoveAll fails with a permission error, we
// may have immutable files so try to remove the
// immutable flag and redo the RemoveAll.
if errors.Is(err, syscall.EPERM) {
if err = resetFileFlags(dir); err != nil {
return fmt.Errorf("resetting file flags: %w", err)
}
err = os.RemoveAll(dir)
if err == nil {
return nil
}
}
pe, ok := err.(*os.PathError)
if !ok {
return err
}
if os.IsNotExist(err) {
if notExistErr[pe.Path] {
return err
}
notExistErr[pe.Path] = true
// There is a race where some subdir can be removed but after the parent
// dir entries have been read.
// So the path could be from `os.Remove(subdir)`
// If the reported non-existent path is not the passed in `dir` we
// should just retry, but otherwise return with no error.
if pe.Path == dir {
return nil
}
continue
}
if !IsEBUSY(pe.Err) {
return err
}
if e := mount.Unmount(pe.Path); e != nil {
return fmt.Errorf("while removing %s: %w", dir, e)
}
if exitOnErr[pe.Path] == maxRetry {
return err
}
exitOnErr[pe.Path]++
time.Sleep(10 * time.Millisecond)
}
}

View File

@@ -0,0 +1,10 @@
//go:build !freebsd
// +build !freebsd
package system
// Reset file flags in a directory tree. This allows EnsureRemoveAll
// to delete trees which have the immutable flag set.
func resetFileFlags(dir string) error {
return nil
}

View File

@@ -0,0 +1,17 @@
package system
import (
"io/fs"
"path/filepath"
)
// Reset file flags in a directory tree. This allows EnsureRemoveAll
// to delete trees which have the immutable flag set.
func resetFileFlags(dir string) error {
return filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
if err := Lchflags(path, 0); err != nil {
return err
}
return nil
})
}

View File

@@ -0,0 +1,12 @@
//go:build !freebsd
// +build !freebsd
package system
type platformStatT struct{}
// Flags return file flags if supported or zero otherwise
func (s StatT) Flags() uint32 {
_ = s.platformStatT // Silence warnings that StatT.platformStatT is unused (on these platforms)
return 0
}

View File

@@ -0,0 +1,15 @@
package system
import "syscall"
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{
size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
mtim: s.Mtimespec,
}, nil
}

Some files were not shown because too many files have changed in this diff Show More