repo_name
stringlengths
8
38
pr_number
int64
3
47.1k
pr_title
stringlengths
8
175
pr_description
stringlengths
2
19.8k
author
null
date_created
stringlengths
25
25
date_merged
stringlengths
25
25
filepath
stringlengths
6
136
before_content
stringlengths
54
884k
after_content
stringlengths
56
884k
pr_author
stringlengths
3
21
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
comment
stringlengths
2
25.4k
comment_author
stringlengths
3
29
__index_level_0__
int64
0
5.1k
moby/moby
42,996
Fix windows rxReservedNames
This regex is currently matching volumes that include a reserved word (ex. test-aux-volume) <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** **- How I did it** **- How to verify it** **- Description for the changelog** Fixes rejecting windows volume names that merely contain a reserved word instead of matching exactly
null
2021-11-05 21:33:19+00:00
2021-11-16 08:35:58+00:00
volume/mounts/windows_parser.go
package mounts // import "github.com/docker/docker/volume/mounts" import ( "errors" "fmt" "os" "regexp" "runtime" "strings" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/pkg/stringid" ) // NewWindowsParser creates a parser with Windows semantics. func NewWindowsParser() Parser { return &windowsParser{ fi: defaultFileInfoProvider{}, } } type windowsParser struct { fi fileInfoProvider } const ( // Spec should be in the format [source:]destination[:mode] // // Examples: c:\foo bar:d:rw // c:\foo:d:\bar // myname:d: // d:\ // // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to // test is https://regex-golang.appspot.com/assets/html/index.html // // Useful link for referencing named capturing groups: // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex // // There are three match groups: source, destination and mode. // // rxHostDir is the first option of a source rxHostDir = `(?:\\\\\?\\)?[a-z]:[\\/](?:[^\\/:*?"<>|\r\n]+[\\/]?)*` // rxName is the second option of a source rxName = `[^\\/:*?"<>|\r\n]+` // RXReservedNames are reserved names not possible on Windows rxReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` // rxPipe is a named path pipe (starts with `\\.\pipe\`, possibly with / instead of \) rxPipe = `[/\\]{2}.[/\\]pipe[/\\][^:*?"<>|\r\n]+` // rxSource is the combined possibilities for a source rxSource = `((?P<source>((` + rxHostDir + `)|(` + rxName + `)|(` + rxPipe + `))):)?` // Source. Can be either a host directory, a name, or omitted: // HostDir: // - Essentially using the folder solution from // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html // but adding case insensitivity. // - Must be an absolute path such as c:\path // - Can include spaces such as `c:\program files` // - And then followed by a colon which is not in the capture group // - And can be optional // Name: // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) // - And then followed by a colon which is not in the capture group // - And can be optional // rxDestination is the regex expression for the mount destination rxDestination = `(?P<destination>((?:\\\\\?\\)?([a-z]):((?:[\\/][^\\/:*?"<>\r\n]+)*[\\/]?))|(` + rxPipe + `))` // rxMode is the regex expression for the mode of the mount // Mode (optional): // - Hopefully self explanatory in comparison to above regex's. // - Colon is not in the capture group rxMode = `(:(?P<mode>(?i)ro|rw))?` ) var ( volumeNameRegexp = regexp.MustCompile(`^` + rxName + `$`) reservedNameRegexp = regexp.MustCompile(`^` + rxReservedNames + `$`) hostDirRegexp = regexp.MustCompile(`^` + rxHostDir + `$`) mountDestinationRegexp = regexp.MustCompile(`^` + rxDestination + `$`) windowsSplitRawSpecRegexp = regexp.MustCompile(`^` + rxSource + rxDestination + rxMode + `$`) ) type mountValidator func(mnt *mount.Mount) error func (p *windowsParser) splitRawSpec(raw string, splitRegexp *regexp.Regexp) ([]string, error) { match := splitRegexp.FindStringSubmatch(strings.ToLower(raw)) if len(match) == 0 { return nil, errInvalidSpec(raw) } var split []string matchgroups := make(map[string]string) // Pull out the sub expressions from the named capture groups for i, name := range splitRegexp.SubexpNames() { matchgroups[name] = strings.ToLower(match[i]) } if source, exists := matchgroups["source"]; exists { if source != "" { split = append(split, source) } } if destination, exists := matchgroups["destination"]; exists { if destination != "" { split = append(split, destination) } } if mode, exists := matchgroups["mode"]; exists { if mode != "" { split = append(split, mode) } } // Fix #26329. If the destination appears to be a file, and the source is null, // it may be because we've fallen through the possible naming regex and hit a // situation where the user intention was to map a file into a container through // a local volume, but this is not supported by the platform. if matchgroups["source"] == "" && matchgroups["destination"] != "" { if volumeNameRegexp.MatchString(matchgroups["destination"]) { if reservedNameRegexp.MatchString(matchgroups["destination"]) { return nil, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", matchgroups["destination"]) } } else { exists, isDir, _ := p.fi.fileInfo(matchgroups["destination"]) if exists && !isDir { return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"]) } } } return split, nil } func windowsValidMountMode(mode string) bool { if mode == "" { return true } // TODO should windows mounts produce an error if any mode was provided (they're a no-op on windows) return rwModes[strings.ToLower(mode)] } func windowsValidateNotRoot(p string) error { p = strings.ToLower(strings.Replace(p, `/`, `\`, -1)) if p == "c:" || p == `c:\` { return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p) } return nil } var windowsValidators mountValidator = func(m *mount.Mount) error { if err := windowsValidateNotRoot(m.Target); err != nil { return err } if !mountDestinationRegexp.MatchString(strings.ToLower(m.Target)) { return fmt.Errorf("invalid mount path: '%s'", m.Target) } return nil } func windowsValidateAbsolute(p string) error { if !mountDestinationRegexp.MatchString(strings.ToLower(p)) { return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) } return nil } func windowsDetectMountType(p string) mount.Type { if strings.HasPrefix(p, `\\.\pipe\`) { return mount.TypeNamedPipe } else if hostDirRegexp.MatchString(p) { return mount.TypeBind } else { return mount.TypeVolume } } func (p *windowsParser) ReadWrite(mode string) bool { return strings.ToLower(mode) != "ro" } // ValidateVolumeName checks a volume name in a platform specific manner. func (p *windowsParser) ValidateVolumeName(name string) error { if !volumeNameRegexp.MatchString(name) { return errors.New("invalid volume name") } if reservedNameRegexp.MatchString(name) { return fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name) } return nil } func (p *windowsParser) ValidateMountConfig(mnt *mount.Mount) error { return p.validateMountConfigReg(mnt, windowsValidators) } type fileInfoProvider interface { fileInfo(path string) (exist, isDir bool, err error) } type defaultFileInfoProvider struct { } func (defaultFileInfoProvider) fileInfo(path string) (exist, isDir bool, err error) { fi, err := os.Stat(path) if err != nil { if !os.IsNotExist(err) { return false, false, err } return false, false, nil } return true, fi.IsDir(), nil } func (p *windowsParser) validateMountConfigReg(mnt *mount.Mount, additionalValidators ...mountValidator) error { if len(mnt.Target) == 0 { return &errMountConfig{mnt, errMissingField("Target")} } for _, v := range additionalValidators { if err := v(mnt); err != nil { return &errMountConfig{mnt, err} } } switch mnt.Type { case mount.TypeBind: if len(mnt.Source) == 0 { return &errMountConfig{mnt, errMissingField("Source")} } // Don't error out just because the propagation mode is not supported on the platform if opts := mnt.BindOptions; opts != nil { if len(opts.Propagation) > 0 { return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} } } if mnt.VolumeOptions != nil { return &errMountConfig{mnt, errExtraField("VolumeOptions")} } if err := windowsValidateAbsolute(mnt.Source); err != nil { return &errMountConfig{mnt, err} } exists, isdir, err := p.fi.fileInfo(mnt.Source) if err != nil { return &errMountConfig{mnt, err} } if !exists { return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)} } if !isdir { return &errMountConfig{mnt, fmt.Errorf("source path must be a directory")} } case mount.TypeVolume: if mnt.BindOptions != nil { return &errMountConfig{mnt, errExtraField("BindOptions")} } if len(mnt.Source) == 0 && mnt.ReadOnly { return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} } if len(mnt.Source) != 0 { if err := p.ValidateVolumeName(mnt.Source); err != nil { return &errMountConfig{mnt, err} } } case mount.TypeNamedPipe: if len(mnt.Source) == 0 { return &errMountConfig{mnt, errMissingField("Source")} } if mnt.BindOptions != nil { return &errMountConfig{mnt, errExtraField("BindOptions")} } if mnt.ReadOnly { return &errMountConfig{mnt, errExtraField("ReadOnly")} } if windowsDetectMountType(mnt.Source) != mount.TypeNamedPipe { return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Source)} } if windowsDetectMountType(mnt.Target) != mount.TypeNamedPipe { return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Target)} } default: return &errMountConfig{mnt, errors.New("mount type unknown")} } return nil } func (p *windowsParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { arr, err := p.splitRawSpec(raw, windowsSplitRawSpecRegexp) if err != nil { return nil, err } return p.parseMount(arr, raw, volumeDriver, true, windowsValidators) } func (p *windowsParser) parseMount(arr []string, raw, volumeDriver string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { var spec mount.Mount var mode string switch len(arr) { case 1: // Just a destination path in the container spec.Target = arr[0] case 2: if windowsValidMountMode(arr[1]) { // Destination + Mode is not a valid volume - volumes // cannot include a mode. e.g. /foo:rw return nil, errInvalidSpec(raw) } // Host Source Path or Name + Destination spec.Source = strings.Replace(arr[0], `/`, `\`, -1) spec.Target = arr[1] case 3: // HostSourcePath+DestinationPath+Mode spec.Source = strings.Replace(arr[0], `/`, `\`, -1) spec.Target = arr[1] mode = arr[2] default: return nil, errInvalidSpec(raw) } if convertTargetToBackslash { spec.Target = strings.Replace(spec.Target, `/`, `\`, -1) } if !windowsValidMountMode(mode) { return nil, errInvalidMode(mode) } spec.Type = windowsDetectMountType(spec.Source) spec.ReadOnly = !p.ReadWrite(mode) // cannot assume that if a volume driver is passed in that we should set it if volumeDriver != "" && spec.Type == mount.TypeVolume { spec.VolumeOptions = &mount.VolumeOptions{ DriverConfig: &mount.Driver{Name: volumeDriver}, } } if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { if spec.VolumeOptions == nil { spec.VolumeOptions = &mount.VolumeOptions{} } spec.VolumeOptions.NoCopy = !copyData } mp, err := p.parseMountSpec(spec, convertTargetToBackslash, additionalValidators...) if mp != nil { mp.Mode = mode } if err != nil { err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) } return mp, err } func (p *windowsParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { return p.parseMountSpec(cfg, true, windowsValidators) } func (p *windowsParser) parseMountSpec(cfg mount.Mount, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { if err := p.validateMountConfigReg(&cfg, additionalValidators...); err != nil { return nil, err } mp := &MountPoint{ RW: !cfg.ReadOnly, Destination: cfg.Target, Type: cfg.Type, Spec: cfg, } if convertTargetToBackslash { mp.Destination = strings.Replace(cfg.Target, `/`, `\`, -1) } switch cfg.Type { case mount.TypeVolume: if cfg.Source == "" { mp.Name = stringid.GenerateRandomID() } else { mp.Name = cfg.Source } mp.CopyData = p.DefaultCopyMode() if cfg.VolumeOptions != nil { if cfg.VolumeOptions.DriverConfig != nil { mp.Driver = cfg.VolumeOptions.DriverConfig.Name } if cfg.VolumeOptions.NoCopy { mp.CopyData = false } } case mount.TypeBind: mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) case mount.TypeNamedPipe: mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) } // cleanup trailing `\` except for paths like `c:\` if len(mp.Source) > 3 && mp.Source[len(mp.Source)-1] == '\\' { mp.Source = mp.Source[:len(mp.Source)-1] } if len(mp.Destination) > 3 && mp.Destination[len(mp.Destination)-1] == '\\' { mp.Destination = mp.Destination[:len(mp.Destination)-1] } return mp, nil } func (p *windowsParser) ParseVolumesFrom(spec string) (string, string, error) { if len(spec) == 0 { return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") } specParts := strings.SplitN(spec, ":", 2) id := specParts[0] mode := "rw" if len(specParts) == 2 { mode = specParts[1] if !windowsValidMountMode(mode) { return "", "", errInvalidMode(mode) } // Do not allow copy modes on volumes-from if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { return "", "", errInvalidMode(mode) } } return id, mode, nil } func (p *windowsParser) DefaultPropagationMode() mount.Propagation { return "" } func (p *windowsParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) { return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS) } func (p *windowsParser) DefaultCopyMode() bool { return false } func (p *windowsParser) IsBackwardCompatible(m *MountPoint) bool { return false } func (p *windowsParser) ValidateTmpfsMountDestination(dest string) error { return errors.New("platform does not support tmpfs") } func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool { return false }
package mounts // import "github.com/docker/docker/volume/mounts" import ( "errors" "fmt" "os" "regexp" "runtime" "strings" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/pkg/stringid" ) // NewWindowsParser creates a parser with Windows semantics. func NewWindowsParser() Parser { return &windowsParser{ fi: defaultFileInfoProvider{}, } } type windowsParser struct { fi fileInfoProvider } const ( // Spec should be in the format [source:]destination[:mode] // // Examples: c:\foo bar:d:rw // c:\foo:d:\bar // myname:d: // d:\ // // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to // test is https://regex-golang.appspot.com/assets/html/index.html // // Useful link for referencing named capturing groups: // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex // // There are three match groups: source, destination and mode. // // rxHostDir is the first option of a source rxHostDir = `(?:\\\\\?\\)?[a-z]:[\\/](?:[^\\/:*?"<>|\r\n]+[\\/]?)*` // rxName is the second option of a source rxName = `[^\\/:*?"<>|\r\n]+` // RXReservedNames are reserved names not possible on Windows rxReservedNames = `(con|prn|nul|aux|com[1-9]|lpt[1-9])` // rxPipe is a named path pipe (starts with `\\.\pipe\`, possibly with / instead of \) rxPipe = `[/\\]{2}.[/\\]pipe[/\\][^:*?"<>|\r\n]+` // rxSource is the combined possibilities for a source rxSource = `((?P<source>((` + rxHostDir + `)|(` + rxName + `)|(` + rxPipe + `))):)?` // Source. Can be either a host directory, a name, or omitted: // HostDir: // - Essentially using the folder solution from // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html // but adding case insensitivity. // - Must be an absolute path such as c:\path // - Can include spaces such as `c:\program files` // - And then followed by a colon which is not in the capture group // - And can be optional // Name: // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) // - And then followed by a colon which is not in the capture group // - And can be optional // rxDestination is the regex expression for the mount destination rxDestination = `(?P<destination>((?:\\\\\?\\)?([a-z]):((?:[\\/][^\\/:*?"<>\r\n]+)*[\\/]?))|(` + rxPipe + `))` // rxMode is the regex expression for the mode of the mount // Mode (optional): // - Hopefully self explanatory in comparison to above regex's. // - Colon is not in the capture group rxMode = `(:(?P<mode>(?i)ro|rw))?` ) var ( volumeNameRegexp = regexp.MustCompile(`^` + rxName + `$`) reservedNameRegexp = regexp.MustCompile(`^` + rxReservedNames + `$`) hostDirRegexp = regexp.MustCompile(`^` + rxHostDir + `$`) mountDestinationRegexp = regexp.MustCompile(`^` + rxDestination + `$`) windowsSplitRawSpecRegexp = regexp.MustCompile(`^` + rxSource + rxDestination + rxMode + `$`) ) type mountValidator func(mnt *mount.Mount) error func (p *windowsParser) splitRawSpec(raw string, splitRegexp *regexp.Regexp) ([]string, error) { match := splitRegexp.FindStringSubmatch(strings.ToLower(raw)) if len(match) == 0 { return nil, errInvalidSpec(raw) } var split []string matchgroups := make(map[string]string) // Pull out the sub expressions from the named capture groups for i, name := range splitRegexp.SubexpNames() { matchgroups[name] = strings.ToLower(match[i]) } if source, exists := matchgroups["source"]; exists { if source != "" { split = append(split, source) } } if destination, exists := matchgroups["destination"]; exists { if destination != "" { split = append(split, destination) } } if mode, exists := matchgroups["mode"]; exists { if mode != "" { split = append(split, mode) } } // Fix #26329. If the destination appears to be a file, and the source is null, // it may be because we've fallen through the possible naming regex and hit a // situation where the user intention was to map a file into a container through // a local volume, but this is not supported by the platform. if matchgroups["source"] == "" && matchgroups["destination"] != "" { if volumeNameRegexp.MatchString(matchgroups["destination"]) { if reservedNameRegexp.MatchString(matchgroups["destination"]) { return nil, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", matchgroups["destination"]) } } else { exists, isDir, _ := p.fi.fileInfo(matchgroups["destination"]) if exists && !isDir { return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"]) } } } return split, nil } func windowsValidMountMode(mode string) bool { if mode == "" { return true } // TODO should windows mounts produce an error if any mode was provided (they're a no-op on windows) return rwModes[strings.ToLower(mode)] } func windowsValidateNotRoot(p string) error { p = strings.ToLower(strings.Replace(p, `/`, `\`, -1)) if p == "c:" || p == `c:\` { return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p) } return nil } var windowsValidators mountValidator = func(m *mount.Mount) error { if err := windowsValidateNotRoot(m.Target); err != nil { return err } if !mountDestinationRegexp.MatchString(strings.ToLower(m.Target)) { return fmt.Errorf("invalid mount path: '%s'", m.Target) } return nil } func windowsValidateAbsolute(p string) error { if !mountDestinationRegexp.MatchString(strings.ToLower(p)) { return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) } return nil } func windowsDetectMountType(p string) mount.Type { if strings.HasPrefix(p, `\\.\pipe\`) { return mount.TypeNamedPipe } else if hostDirRegexp.MatchString(p) { return mount.TypeBind } else { return mount.TypeVolume } } func (p *windowsParser) ReadWrite(mode string) bool { return strings.ToLower(mode) != "ro" } // ValidateVolumeName checks a volume name in a platform specific manner. func (p *windowsParser) ValidateVolumeName(name string) error { if !volumeNameRegexp.MatchString(name) { return errors.New("invalid volume name") } if reservedNameRegexp.MatchString(name) { return fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name) } return nil } func (p *windowsParser) ValidateMountConfig(mnt *mount.Mount) error { return p.validateMountConfigReg(mnt, windowsValidators) } type fileInfoProvider interface { fileInfo(path string) (exist, isDir bool, err error) } type defaultFileInfoProvider struct { } func (defaultFileInfoProvider) fileInfo(path string) (exist, isDir bool, err error) { fi, err := os.Stat(path) if err != nil { if !os.IsNotExist(err) { return false, false, err } return false, false, nil } return true, fi.IsDir(), nil } func (p *windowsParser) validateMountConfigReg(mnt *mount.Mount, additionalValidators ...mountValidator) error { if len(mnt.Target) == 0 { return &errMountConfig{mnt, errMissingField("Target")} } for _, v := range additionalValidators { if err := v(mnt); err != nil { return &errMountConfig{mnt, err} } } switch mnt.Type { case mount.TypeBind: if len(mnt.Source) == 0 { return &errMountConfig{mnt, errMissingField("Source")} } // Don't error out just because the propagation mode is not supported on the platform if opts := mnt.BindOptions; opts != nil { if len(opts.Propagation) > 0 { return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} } } if mnt.VolumeOptions != nil { return &errMountConfig{mnt, errExtraField("VolumeOptions")} } if err := windowsValidateAbsolute(mnt.Source); err != nil { return &errMountConfig{mnt, err} } exists, isdir, err := p.fi.fileInfo(mnt.Source) if err != nil { return &errMountConfig{mnt, err} } if !exists { return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)} } if !isdir { return &errMountConfig{mnt, fmt.Errorf("source path must be a directory")} } case mount.TypeVolume: if mnt.BindOptions != nil { return &errMountConfig{mnt, errExtraField("BindOptions")} } if len(mnt.Source) == 0 && mnt.ReadOnly { return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} } if len(mnt.Source) != 0 { if err := p.ValidateVolumeName(mnt.Source); err != nil { return &errMountConfig{mnt, err} } } case mount.TypeNamedPipe: if len(mnt.Source) == 0 { return &errMountConfig{mnt, errMissingField("Source")} } if mnt.BindOptions != nil { return &errMountConfig{mnt, errExtraField("BindOptions")} } if mnt.ReadOnly { return &errMountConfig{mnt, errExtraField("ReadOnly")} } if windowsDetectMountType(mnt.Source) != mount.TypeNamedPipe { return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Source)} } if windowsDetectMountType(mnt.Target) != mount.TypeNamedPipe { return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Target)} } default: return &errMountConfig{mnt, errors.New("mount type unknown")} } return nil } func (p *windowsParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { arr, err := p.splitRawSpec(raw, windowsSplitRawSpecRegexp) if err != nil { return nil, err } return p.parseMount(arr, raw, volumeDriver, true, windowsValidators) } func (p *windowsParser) parseMount(arr []string, raw, volumeDriver string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { var spec mount.Mount var mode string switch len(arr) { case 1: // Just a destination path in the container spec.Target = arr[0] case 2: if windowsValidMountMode(arr[1]) { // Destination + Mode is not a valid volume - volumes // cannot include a mode. e.g. /foo:rw return nil, errInvalidSpec(raw) } // Host Source Path or Name + Destination spec.Source = strings.Replace(arr[0], `/`, `\`, -1) spec.Target = arr[1] case 3: // HostSourcePath+DestinationPath+Mode spec.Source = strings.Replace(arr[0], `/`, `\`, -1) spec.Target = arr[1] mode = arr[2] default: return nil, errInvalidSpec(raw) } if convertTargetToBackslash { spec.Target = strings.Replace(spec.Target, `/`, `\`, -1) } if !windowsValidMountMode(mode) { return nil, errInvalidMode(mode) } spec.Type = windowsDetectMountType(spec.Source) spec.ReadOnly = !p.ReadWrite(mode) // cannot assume that if a volume driver is passed in that we should set it if volumeDriver != "" && spec.Type == mount.TypeVolume { spec.VolumeOptions = &mount.VolumeOptions{ DriverConfig: &mount.Driver{Name: volumeDriver}, } } if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { if spec.VolumeOptions == nil { spec.VolumeOptions = &mount.VolumeOptions{} } spec.VolumeOptions.NoCopy = !copyData } mp, err := p.parseMountSpec(spec, convertTargetToBackslash, additionalValidators...) if mp != nil { mp.Mode = mode } if err != nil { err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) } return mp, err } func (p *windowsParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { return p.parseMountSpec(cfg, true, windowsValidators) } func (p *windowsParser) parseMountSpec(cfg mount.Mount, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { if err := p.validateMountConfigReg(&cfg, additionalValidators...); err != nil { return nil, err } mp := &MountPoint{ RW: !cfg.ReadOnly, Destination: cfg.Target, Type: cfg.Type, Spec: cfg, } if convertTargetToBackslash { mp.Destination = strings.Replace(cfg.Target, `/`, `\`, -1) } switch cfg.Type { case mount.TypeVolume: if cfg.Source == "" { mp.Name = stringid.GenerateRandomID() } else { mp.Name = cfg.Source } mp.CopyData = p.DefaultCopyMode() if cfg.VolumeOptions != nil { if cfg.VolumeOptions.DriverConfig != nil { mp.Driver = cfg.VolumeOptions.DriverConfig.Name } if cfg.VolumeOptions.NoCopy { mp.CopyData = false } } case mount.TypeBind: mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) case mount.TypeNamedPipe: mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) } // cleanup trailing `\` except for paths like `c:\` if len(mp.Source) > 3 && mp.Source[len(mp.Source)-1] == '\\' { mp.Source = mp.Source[:len(mp.Source)-1] } if len(mp.Destination) > 3 && mp.Destination[len(mp.Destination)-1] == '\\' { mp.Destination = mp.Destination[:len(mp.Destination)-1] } return mp, nil } func (p *windowsParser) ParseVolumesFrom(spec string) (string, string, error) { if len(spec) == 0 { return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") } specParts := strings.SplitN(spec, ":", 2) id := specParts[0] mode := "rw" if len(specParts) == 2 { mode = specParts[1] if !windowsValidMountMode(mode) { return "", "", errInvalidMode(mode) } // Do not allow copy modes on volumes-from if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { return "", "", errInvalidMode(mode) } } return id, mode, nil } func (p *windowsParser) DefaultPropagationMode() mount.Propagation { return "" } func (p *windowsParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) { return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS) } func (p *windowsParser) DefaultCopyMode() bool { return false } func (p *windowsParser) IsBackwardCompatible(m *MountPoint) bool { return false } func (p *windowsParser) ValidateTmpfsMountDestination(dest string) error { return errors.New("platform does not support tmpfs") } func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool { return false }
robert-shade
40ccedd61b9d8b64fc75d64cd57b9ee35c769632
e9ab1d425638af916b84d6e0f7f87ef6fa6e6ca9
It does need them - that was the original bug. `^con|prn|nul|aux|com[1-9]|lpt[1-9]$` matches `test-aux-volume` `^(con|prn|nul|aux|com[1-9]|lpt[1-9])$`does not
robert-shade
4,398
moby/moby
42,980
vendor: github.com/moby/sys/mount v0.3.0, mountinfo v0.5.0, signal v0.6.0, symlink v0.2.0
### vendor: golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 full diff: https://github.com/golang/sys/compare/63515b42dcdf9544f4e6a02fd7632793fde2f72d...69cdffdb9359ff97d91e4f4fbb6b2714c3898eae ### vendor: github.com/moby/sys/mount v0.3.0, mountinfo v0.5.0, signal v0.6.0, symlink v0.2.0 full diff: https://github.com/moby/sys/compare/signal/v0.5.0...signal/v0.6.0 Modules: - github.com/moby/sys/mount v0.3.0 - github.com/moby/sys/mountinfo v0.5.0 - github.com/moby/sys/signal v0.6.0 - github.com/moby/sys/symlink v0.2.0
null
2021-11-01 13:43:16+00:00
2021-11-19 12:23:51+00:00
integration/container/kill_test.go
package container // import "github.com/docker/docker/integration/container" import ( "context" "testing" "time" "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/testutil/request" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestKillContainerInvalidSignal(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, "0") assert.Error(t, err, "Error response from daemon: Invalid signal: 0") poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err = client.ContainerKill(ctx, id, "SIG42") assert.Error(t, err, "Error response from daemon: Invalid signal: SIG42") poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) } func TestKillContainer(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string signal string status string skipOs string }{ { doc: "no signal", signal: "", status: "exited", skipOs: "", }, { doc: "non killing signal", signal: "SIGWINCH", status: "running", skipOs: "windows", }, { doc: "killing signal", signal: "SIGTERM", status: "exited", skipOs: "", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { skip.If(t, testEnv.OSType == tc.skipOs, "Windows does not support SIGWINCH") ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, tc.signal) assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillWithStopSignalAndRestartPolicies(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string stopsignal string status string }{ { doc: "same-signal-disables-restart-policy", stopsignal: "TERM", status: "exited", }, { doc: "different-signal-keep-restart-policy", stopsignal: "CONT", status: "running", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { ctx := context.Background() id := container.Run(ctx, t, client, container.WithRestartPolicy("always"), func(c *container.TestContainerConfig) { c.Config.StopSignal = tc.stopsignal }) err := client.ContainerKill(ctx, id, "TERM") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillStoppedContainer(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.Assert(t, is.ErrorContains(err, "")) assert.Assert(t, is.Contains(err.Error(), "is not running")) } func TestKillStoppedContainerAPIPre120(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) } func TestKillDifferentUserContainer(t *testing.T) { // TODO Windows: Windows does not yet support -u (Feb 2016). skip.If(t, testEnv.OSType == "windows", "User containers (container.Config.User) are not yet supported on %q platform", testEnv.OSType) defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Run(ctx, t, client, func(c *container.TestContainerConfig) { c.Config.User = "daemon" }) poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, "exited"), poll.WithDelay(100*time.Millisecond)) } func TestInspectOomKilledTrue(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") skip.If(t, testEnv.DaemonInfo.CgroupDriver == "none") skip.If(t, !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) skip.If(t, testEnv.DaemonInfo.CgroupVersion == "2", "FIXME: flaky on cgroup v2 (https://github.com/moby/moby/issues/41929)") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "x=a; while true; do x=$x$x$x$x; done"), func(c *container.TestContainerConfig) { c.HostConfig.Resources.Memory = 32 * 1024 * 1024 }) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(true, inspect.State.OOMKilled)) } func TestInspectOomKilledFalse(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows" || !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "echo hello world")) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(false, inspect.State.OOMKilled)) }
package container // import "github.com/docker/docker/integration/container" import ( "context" "testing" "time" "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/testutil/request" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestKillContainerInvalidSignal(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, "0") assert.ErrorContains(t, err, "Error response from daemon:") assert.ErrorContains(t, err, "nvalid signal: 0") // match "(I|i)nvalid" case-insensitive to allow testing against older daemons. poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err = client.ContainerKill(ctx, id, "SIG42") assert.ErrorContains(t, err, "Error response from daemon:") assert.ErrorContains(t, err, "nvalid signal: SIG42") // match "(I|i)nvalid" case-insensitive to allow testing against older daemons. poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) } func TestKillContainer(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string signal string status string skipOs string }{ { doc: "no signal", signal: "", status: "exited", skipOs: "", }, { doc: "non killing signal", signal: "SIGWINCH", status: "running", skipOs: "windows", }, { doc: "killing signal", signal: "SIGTERM", status: "exited", skipOs: "", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { skip.If(t, testEnv.OSType == tc.skipOs, "Windows does not support SIGWINCH") ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, tc.signal) assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillWithStopSignalAndRestartPolicies(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string stopsignal string status string }{ { doc: "same-signal-disables-restart-policy", stopsignal: "TERM", status: "exited", }, { doc: "different-signal-keep-restart-policy", stopsignal: "CONT", status: "running", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { ctx := context.Background() id := container.Run(ctx, t, client, container.WithRestartPolicy("always"), func(c *container.TestContainerConfig) { c.Config.StopSignal = tc.stopsignal }) err := client.ContainerKill(ctx, id, "TERM") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillStoppedContainer(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.Assert(t, is.ErrorContains(err, "")) assert.Assert(t, is.Contains(err.Error(), "is not running")) } func TestKillStoppedContainerAPIPre120(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) } func TestKillDifferentUserContainer(t *testing.T) { // TODO Windows: Windows does not yet support -u (Feb 2016). skip.If(t, testEnv.OSType == "windows", "User containers (container.Config.User) are not yet supported on %q platform", testEnv.OSType) defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Run(ctx, t, client, func(c *container.TestContainerConfig) { c.Config.User = "daemon" }) poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, "exited"), poll.WithDelay(100*time.Millisecond)) } func TestInspectOomKilledTrue(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") skip.If(t, testEnv.DaemonInfo.CgroupDriver == "none") skip.If(t, !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) skip.If(t, testEnv.DaemonInfo.CgroupVersion == "2", "FIXME: flaky on cgroup v2 (https://github.com/moby/moby/issues/41929)") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "x=a; while true; do x=$x$x$x$x; done"), func(c *container.TestContainerConfig) { c.HostConfig.Resources.Memory = 32 * 1024 * 1024 }) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(true, inspect.State.OOMKilled)) } func TestInspectOomKilledFalse(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows" || !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "echo hello world")) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(false, inspect.State.OOMKilled)) }
thaJeztah
4fafb27799145fa2f3b58503452a2f33864b7ab9
f06314fa842abdb76eafa0f3d204b02cc1290b0c
Should this be "nvalid" or "invalid"?
samuelkarp
4,399
moby/moby
42,980
vendor: github.com/moby/sys/mount v0.3.0, mountinfo v0.5.0, signal v0.6.0, symlink v0.2.0
### vendor: golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 full diff: https://github.com/golang/sys/compare/63515b42dcdf9544f4e6a02fd7632793fde2f72d...69cdffdb9359ff97d91e4f4fbb6b2714c3898eae ### vendor: github.com/moby/sys/mount v0.3.0, mountinfo v0.5.0, signal v0.6.0, symlink v0.2.0 full diff: https://github.com/moby/sys/compare/signal/v0.5.0...signal/v0.6.0 Modules: - github.com/moby/sys/mount v0.3.0 - github.com/moby/sys/mountinfo v0.5.0 - github.com/moby/sys/signal v0.6.0 - github.com/moby/sys/symlink v0.2.0
null
2021-11-01 13:43:16+00:00
2021-11-19 12:23:51+00:00
integration/container/kill_test.go
package container // import "github.com/docker/docker/integration/container" import ( "context" "testing" "time" "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/testutil/request" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestKillContainerInvalidSignal(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, "0") assert.Error(t, err, "Error response from daemon: Invalid signal: 0") poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err = client.ContainerKill(ctx, id, "SIG42") assert.Error(t, err, "Error response from daemon: Invalid signal: SIG42") poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) } func TestKillContainer(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string signal string status string skipOs string }{ { doc: "no signal", signal: "", status: "exited", skipOs: "", }, { doc: "non killing signal", signal: "SIGWINCH", status: "running", skipOs: "windows", }, { doc: "killing signal", signal: "SIGTERM", status: "exited", skipOs: "", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { skip.If(t, testEnv.OSType == tc.skipOs, "Windows does not support SIGWINCH") ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, tc.signal) assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillWithStopSignalAndRestartPolicies(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string stopsignal string status string }{ { doc: "same-signal-disables-restart-policy", stopsignal: "TERM", status: "exited", }, { doc: "different-signal-keep-restart-policy", stopsignal: "CONT", status: "running", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { ctx := context.Background() id := container.Run(ctx, t, client, container.WithRestartPolicy("always"), func(c *container.TestContainerConfig) { c.Config.StopSignal = tc.stopsignal }) err := client.ContainerKill(ctx, id, "TERM") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillStoppedContainer(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.Assert(t, is.ErrorContains(err, "")) assert.Assert(t, is.Contains(err.Error(), "is not running")) } func TestKillStoppedContainerAPIPre120(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) } func TestKillDifferentUserContainer(t *testing.T) { // TODO Windows: Windows does not yet support -u (Feb 2016). skip.If(t, testEnv.OSType == "windows", "User containers (container.Config.User) are not yet supported on %q platform", testEnv.OSType) defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Run(ctx, t, client, func(c *container.TestContainerConfig) { c.Config.User = "daemon" }) poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, "exited"), poll.WithDelay(100*time.Millisecond)) } func TestInspectOomKilledTrue(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") skip.If(t, testEnv.DaemonInfo.CgroupDriver == "none") skip.If(t, !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) skip.If(t, testEnv.DaemonInfo.CgroupVersion == "2", "FIXME: flaky on cgroup v2 (https://github.com/moby/moby/issues/41929)") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "x=a; while true; do x=$x$x$x$x; done"), func(c *container.TestContainerConfig) { c.HostConfig.Resources.Memory = 32 * 1024 * 1024 }) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(true, inspect.State.OOMKilled)) } func TestInspectOomKilledFalse(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows" || !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "echo hello world")) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(false, inspect.State.OOMKilled)) }
package container // import "github.com/docker/docker/integration/container" import ( "context" "testing" "time" "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/testutil/request" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestKillContainerInvalidSignal(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, "0") assert.ErrorContains(t, err, "Error response from daemon:") assert.ErrorContains(t, err, "nvalid signal: 0") // match "(I|i)nvalid" case-insensitive to allow testing against older daemons. poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err = client.ContainerKill(ctx, id, "SIG42") assert.ErrorContains(t, err, "Error response from daemon:") assert.ErrorContains(t, err, "nvalid signal: SIG42") // match "(I|i)nvalid" case-insensitive to allow testing against older daemons. poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) } func TestKillContainer(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string signal string status string skipOs string }{ { doc: "no signal", signal: "", status: "exited", skipOs: "", }, { doc: "non killing signal", signal: "SIGWINCH", status: "running", skipOs: "windows", }, { doc: "killing signal", signal: "SIGTERM", status: "exited", skipOs: "", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { skip.If(t, testEnv.OSType == tc.skipOs, "Windows does not support SIGWINCH") ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, tc.signal) assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillWithStopSignalAndRestartPolicies(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string stopsignal string status string }{ { doc: "same-signal-disables-restart-policy", stopsignal: "TERM", status: "exited", }, { doc: "different-signal-keep-restart-policy", stopsignal: "CONT", status: "running", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { ctx := context.Background() id := container.Run(ctx, t, client, container.WithRestartPolicy("always"), func(c *container.TestContainerConfig) { c.Config.StopSignal = tc.stopsignal }) err := client.ContainerKill(ctx, id, "TERM") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillStoppedContainer(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.Assert(t, is.ErrorContains(err, "")) assert.Assert(t, is.Contains(err.Error(), "is not running")) } func TestKillStoppedContainerAPIPre120(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) } func TestKillDifferentUserContainer(t *testing.T) { // TODO Windows: Windows does not yet support -u (Feb 2016). skip.If(t, testEnv.OSType == "windows", "User containers (container.Config.User) are not yet supported on %q platform", testEnv.OSType) defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Run(ctx, t, client, func(c *container.TestContainerConfig) { c.Config.User = "daemon" }) poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, "exited"), poll.WithDelay(100*time.Millisecond)) } func TestInspectOomKilledTrue(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") skip.If(t, testEnv.DaemonInfo.CgroupDriver == "none") skip.If(t, !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) skip.If(t, testEnv.DaemonInfo.CgroupVersion == "2", "FIXME: flaky on cgroup v2 (https://github.com/moby/moby/issues/41929)") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "x=a; while true; do x=$x$x$x$x; done"), func(c *container.TestContainerConfig) { c.HostConfig.Resources.Memory = 32 * 1024 * 1024 }) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(true, inspect.State.OOMKilled)) } func TestInspectOomKilledFalse(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows" || !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "echo hello world")) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(false, inspect.State.OOMKilled)) }
thaJeztah
4fafb27799145fa2f3b58503452a2f33864b7ab9
f06314fa842abdb76eafa0f3d204b02cc1290b0c
ah, so this was a little hack; the update has some linting fixes, one of them is "errors should not be capitalised or contain interpunction" so it changed from `Invalid signal` to `invalid signal`. Tried to keep it "portable" without doing a `string.ToLower()`. Probably not strictly needed (in case we wanted to run the integration tests against different daemon versions, but I guess we won't be doing that)
thaJeztah
4,400
moby/moby
42,980
vendor: github.com/moby/sys/mount v0.3.0, mountinfo v0.5.0, signal v0.6.0, symlink v0.2.0
### vendor: golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 full diff: https://github.com/golang/sys/compare/63515b42dcdf9544f4e6a02fd7632793fde2f72d...69cdffdb9359ff97d91e4f4fbb6b2714c3898eae ### vendor: github.com/moby/sys/mount v0.3.0, mountinfo v0.5.0, signal v0.6.0, symlink v0.2.0 full diff: https://github.com/moby/sys/compare/signal/v0.5.0...signal/v0.6.0 Modules: - github.com/moby/sys/mount v0.3.0 - github.com/moby/sys/mountinfo v0.5.0 - github.com/moby/sys/signal v0.6.0 - github.com/moby/sys/symlink v0.2.0
null
2021-11-01 13:43:16+00:00
2021-11-19 12:23:51+00:00
integration/container/kill_test.go
package container // import "github.com/docker/docker/integration/container" import ( "context" "testing" "time" "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/testutil/request" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestKillContainerInvalidSignal(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, "0") assert.Error(t, err, "Error response from daemon: Invalid signal: 0") poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err = client.ContainerKill(ctx, id, "SIG42") assert.Error(t, err, "Error response from daemon: Invalid signal: SIG42") poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) } func TestKillContainer(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string signal string status string skipOs string }{ { doc: "no signal", signal: "", status: "exited", skipOs: "", }, { doc: "non killing signal", signal: "SIGWINCH", status: "running", skipOs: "windows", }, { doc: "killing signal", signal: "SIGTERM", status: "exited", skipOs: "", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { skip.If(t, testEnv.OSType == tc.skipOs, "Windows does not support SIGWINCH") ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, tc.signal) assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillWithStopSignalAndRestartPolicies(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string stopsignal string status string }{ { doc: "same-signal-disables-restart-policy", stopsignal: "TERM", status: "exited", }, { doc: "different-signal-keep-restart-policy", stopsignal: "CONT", status: "running", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { ctx := context.Background() id := container.Run(ctx, t, client, container.WithRestartPolicy("always"), func(c *container.TestContainerConfig) { c.Config.StopSignal = tc.stopsignal }) err := client.ContainerKill(ctx, id, "TERM") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillStoppedContainer(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.Assert(t, is.ErrorContains(err, "")) assert.Assert(t, is.Contains(err.Error(), "is not running")) } func TestKillStoppedContainerAPIPre120(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) } func TestKillDifferentUserContainer(t *testing.T) { // TODO Windows: Windows does not yet support -u (Feb 2016). skip.If(t, testEnv.OSType == "windows", "User containers (container.Config.User) are not yet supported on %q platform", testEnv.OSType) defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Run(ctx, t, client, func(c *container.TestContainerConfig) { c.Config.User = "daemon" }) poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, "exited"), poll.WithDelay(100*time.Millisecond)) } func TestInspectOomKilledTrue(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") skip.If(t, testEnv.DaemonInfo.CgroupDriver == "none") skip.If(t, !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) skip.If(t, testEnv.DaemonInfo.CgroupVersion == "2", "FIXME: flaky on cgroup v2 (https://github.com/moby/moby/issues/41929)") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "x=a; while true; do x=$x$x$x$x; done"), func(c *container.TestContainerConfig) { c.HostConfig.Resources.Memory = 32 * 1024 * 1024 }) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(true, inspect.State.OOMKilled)) } func TestInspectOomKilledFalse(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows" || !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "echo hello world")) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(false, inspect.State.OOMKilled)) }
package container // import "github.com/docker/docker/integration/container" import ( "context" "testing" "time" "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/testutil/request" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestKillContainerInvalidSignal(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, "0") assert.ErrorContains(t, err, "Error response from daemon:") assert.ErrorContains(t, err, "nvalid signal: 0") // match "(I|i)nvalid" case-insensitive to allow testing against older daemons. poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err = client.ContainerKill(ctx, id, "SIG42") assert.ErrorContains(t, err, "Error response from daemon:") assert.ErrorContains(t, err, "nvalid signal: SIG42") // match "(I|i)nvalid" case-insensitive to allow testing against older daemons. poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) } func TestKillContainer(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string signal string status string skipOs string }{ { doc: "no signal", signal: "", status: "exited", skipOs: "", }, { doc: "non killing signal", signal: "SIGWINCH", status: "running", skipOs: "windows", }, { doc: "killing signal", signal: "SIGTERM", status: "exited", skipOs: "", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { skip.If(t, testEnv.OSType == tc.skipOs, "Windows does not support SIGWINCH") ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, tc.signal) assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillWithStopSignalAndRestartPolicies(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string stopsignal string status string }{ { doc: "same-signal-disables-restart-policy", stopsignal: "TERM", status: "exited", }, { doc: "different-signal-keep-restart-policy", stopsignal: "CONT", status: "running", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { ctx := context.Background() id := container.Run(ctx, t, client, container.WithRestartPolicy("always"), func(c *container.TestContainerConfig) { c.Config.StopSignal = tc.stopsignal }) err := client.ContainerKill(ctx, id, "TERM") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillStoppedContainer(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.Assert(t, is.ErrorContains(err, "")) assert.Assert(t, is.Contains(err.Error(), "is not running")) } func TestKillStoppedContainerAPIPre120(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) } func TestKillDifferentUserContainer(t *testing.T) { // TODO Windows: Windows does not yet support -u (Feb 2016). skip.If(t, testEnv.OSType == "windows", "User containers (container.Config.User) are not yet supported on %q platform", testEnv.OSType) defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Run(ctx, t, client, func(c *container.TestContainerConfig) { c.Config.User = "daemon" }) poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, "exited"), poll.WithDelay(100*time.Millisecond)) } func TestInspectOomKilledTrue(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") skip.If(t, testEnv.DaemonInfo.CgroupDriver == "none") skip.If(t, !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) skip.If(t, testEnv.DaemonInfo.CgroupVersion == "2", "FIXME: flaky on cgroup v2 (https://github.com/moby/moby/issues/41929)") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "x=a; while true; do x=$x$x$x$x; done"), func(c *container.TestContainerConfig) { c.HostConfig.Resources.Memory = 32 * 1024 * 1024 }) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(true, inspect.State.OOMKilled)) } func TestInspectOomKilledFalse(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows" || !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "echo hello world")) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(false, inspect.State.OOMKilled)) }
thaJeztah
4fafb27799145fa2f3b58503452a2f33864b7ab9
f06314fa842abdb76eafa0f3d204b02cc1290b0c
That'd be worth a comment if you're inclined to make a minor fix here.
samuelkarp
4,401
moby/moby
42,980
vendor: github.com/moby/sys/mount v0.3.0, mountinfo v0.5.0, signal v0.6.0, symlink v0.2.0
### vendor: golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 full diff: https://github.com/golang/sys/compare/63515b42dcdf9544f4e6a02fd7632793fde2f72d...69cdffdb9359ff97d91e4f4fbb6b2714c3898eae ### vendor: github.com/moby/sys/mount v0.3.0, mountinfo v0.5.0, signal v0.6.0, symlink v0.2.0 full diff: https://github.com/moby/sys/compare/signal/v0.5.0...signal/v0.6.0 Modules: - github.com/moby/sys/mount v0.3.0 - github.com/moby/sys/mountinfo v0.5.0 - github.com/moby/sys/signal v0.6.0 - github.com/moby/sys/symlink v0.2.0
null
2021-11-01 13:43:16+00:00
2021-11-19 12:23:51+00:00
integration/container/kill_test.go
package container // import "github.com/docker/docker/integration/container" import ( "context" "testing" "time" "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/testutil/request" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestKillContainerInvalidSignal(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, "0") assert.Error(t, err, "Error response from daemon: Invalid signal: 0") poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err = client.ContainerKill(ctx, id, "SIG42") assert.Error(t, err, "Error response from daemon: Invalid signal: SIG42") poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) } func TestKillContainer(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string signal string status string skipOs string }{ { doc: "no signal", signal: "", status: "exited", skipOs: "", }, { doc: "non killing signal", signal: "SIGWINCH", status: "running", skipOs: "windows", }, { doc: "killing signal", signal: "SIGTERM", status: "exited", skipOs: "", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { skip.If(t, testEnv.OSType == tc.skipOs, "Windows does not support SIGWINCH") ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, tc.signal) assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillWithStopSignalAndRestartPolicies(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string stopsignal string status string }{ { doc: "same-signal-disables-restart-policy", stopsignal: "TERM", status: "exited", }, { doc: "different-signal-keep-restart-policy", stopsignal: "CONT", status: "running", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { ctx := context.Background() id := container.Run(ctx, t, client, container.WithRestartPolicy("always"), func(c *container.TestContainerConfig) { c.Config.StopSignal = tc.stopsignal }) err := client.ContainerKill(ctx, id, "TERM") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillStoppedContainer(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.Assert(t, is.ErrorContains(err, "")) assert.Assert(t, is.Contains(err.Error(), "is not running")) } func TestKillStoppedContainerAPIPre120(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) } func TestKillDifferentUserContainer(t *testing.T) { // TODO Windows: Windows does not yet support -u (Feb 2016). skip.If(t, testEnv.OSType == "windows", "User containers (container.Config.User) are not yet supported on %q platform", testEnv.OSType) defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Run(ctx, t, client, func(c *container.TestContainerConfig) { c.Config.User = "daemon" }) poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, "exited"), poll.WithDelay(100*time.Millisecond)) } func TestInspectOomKilledTrue(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") skip.If(t, testEnv.DaemonInfo.CgroupDriver == "none") skip.If(t, !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) skip.If(t, testEnv.DaemonInfo.CgroupVersion == "2", "FIXME: flaky on cgroup v2 (https://github.com/moby/moby/issues/41929)") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "x=a; while true; do x=$x$x$x$x; done"), func(c *container.TestContainerConfig) { c.HostConfig.Resources.Memory = 32 * 1024 * 1024 }) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(true, inspect.State.OOMKilled)) } func TestInspectOomKilledFalse(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows" || !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "echo hello world")) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(false, inspect.State.OOMKilled)) }
package container // import "github.com/docker/docker/integration/container" import ( "context" "testing" "time" "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/testutil/request" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestKillContainerInvalidSignal(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, "0") assert.ErrorContains(t, err, "Error response from daemon:") assert.ErrorContains(t, err, "nvalid signal: 0") // match "(I|i)nvalid" case-insensitive to allow testing against older daemons. poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err = client.ContainerKill(ctx, id, "SIG42") assert.ErrorContains(t, err, "Error response from daemon:") assert.ErrorContains(t, err, "nvalid signal: SIG42") // match "(I|i)nvalid" case-insensitive to allow testing against older daemons. poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) } func TestKillContainer(t *testing.T) { defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string signal string status string skipOs string }{ { doc: "no signal", signal: "", status: "exited", skipOs: "", }, { doc: "non killing signal", signal: "SIGWINCH", status: "running", skipOs: "windows", }, { doc: "killing signal", signal: "SIGTERM", status: "exited", skipOs: "", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { skip.If(t, testEnv.OSType == tc.skipOs, "Windows does not support SIGWINCH") ctx := context.Background() id := container.Run(ctx, t, client) err := client.ContainerKill(ctx, id, tc.signal) assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillWithStopSignalAndRestartPolicies(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() client := testEnv.APIClient() testCases := []struct { doc string stopsignal string status string }{ { doc: "same-signal-disables-restart-policy", stopsignal: "TERM", status: "exited", }, { doc: "different-signal-keep-restart-policy", stopsignal: "CONT", status: "running", }, } for _, tc := range testCases { tc := tc t.Run(tc.doc, func(t *testing.T) { ctx := context.Background() id := container.Run(ctx, t, client, container.WithRestartPolicy("always"), func(c *container.TestContainerConfig) { c.Config.StopSignal = tc.stopsignal }) err := client.ContainerKill(ctx, id, "TERM") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) } } func TestKillStoppedContainer(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.Assert(t, is.ErrorContains(err, "")) assert.Assert(t, is.Contains(err.Error(), "is not running")) } func TestKillStoppedContainerAPIPre120(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports 1.25 or later") defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Create(ctx, t, client) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) } func TestKillDifferentUserContainer(t *testing.T) { // TODO Windows: Windows does not yet support -u (Feb 2016). skip.If(t, testEnv.OSType == "windows", "User containers (container.Config.User) are not yet supported on %q platform", testEnv.OSType) defer setupTest(t)() ctx := context.Background() client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Run(ctx, t, client, func(c *container.TestContainerConfig) { c.Config.User = "daemon" }) poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err := client.ContainerKill(ctx, id, "SIGKILL") assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, "exited"), poll.WithDelay(100*time.Millisecond)) } func TestInspectOomKilledTrue(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") skip.If(t, testEnv.DaemonInfo.CgroupDriver == "none") skip.If(t, !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) skip.If(t, testEnv.DaemonInfo.CgroupVersion == "2", "FIXME: flaky on cgroup v2 (https://github.com/moby/moby/issues/41929)") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "x=a; while true; do x=$x$x$x$x; done"), func(c *container.TestContainerConfig) { c.HostConfig.Resources.Memory = 32 * 1024 * 1024 }) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(true, inspect.State.OOMKilled)) } func TestInspectOomKilledFalse(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows" || !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "echo hello world")) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) assert.NilError(t, err) assert.Check(t, is.Equal(false, inspect.State.OOMKilled)) }
thaJeztah
4fafb27799145fa2f3b58503452a2f33864b7ab9
f06314fa842abdb76eafa0f3d204b02cc1290b0c
I rebased, and added a comment 👍
thaJeztah
4,402
moby/moby
42,979
vendor: github.com/fluent/fluent-logger-golang v1.8.0
Fixes #40063. **- What I did** Update the fluent logger library to v1.8.0. Following commits/PRs were merged since last bump: * [Add callback for error handling when using async](https://github.com/fluent/fluent-logger-golang/pull/97) * [Fix panic when accessing unexported struct field](https://github.com/fluent/fluent-logger-golang/pull/99) * [Properly stop logger during (re)connect failure](https://github.com/fluent/fluent-logger-golang/pull/82) * [Support a TLS-enabled connection](https://github.com/fluent/fluent-logger-golang/pull/107) * [Do not allow writing events after fluent.Close()](https://github.com/fluent/fluent-logger-golang/pull/105) **- How to verify it** Before this change, running the following commands would lead to docker hanging on the `kill` command: ```console $ docker run --rm -it --name test --log-driver=fluentd --log-opt fluentd-address=fluentdhost:24224 --log-opt fluentd-async=true debian /bin/bash -c 'echo test; sleep infinity;' # In another terminal: $ docker kill test ``` **- Description for the changelog** - Forcefully stop fluentd logger when a container stop and the logger is running in async mode.
null
2021-11-01 10:12:54+00:00
2021-12-02 19:51:04+00:00
daemon/logger/fluentd/fluentd.go
// Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( "math" "net" "net/url" "strconv" "strings" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/urlutil" units "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type fluentd struct { tag string containerID string containerName string writer *fluent.Fluent extra map[string]string } type location struct { protocol string host string port int path string } const ( name = "fluentd" defaultBufferLimit = 1024 * 1024 defaultHost = "127.0.0.1" defaultPort = 24224 defaultProtocol = "tcp" // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] defaultMaxRetries = math.MaxInt32 defaultRetryWait = 1000 addressKey = "fluentd-address" asyncKey = "fluentd-async" asyncConnectKey = "fluentd-async-connect" // deprecated option (use fluent-async instead) bufferLimitKey = "fluentd-buffer-limit" maxRetriesKey = "fluentd-max-retries" requestAckKey = "fluentd-request-ack" retryWaitKey = "fluentd-retry-wait" subSecondPrecisionKey = "fluentd-sub-second-precision" ) func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. func New(info logger.Info) (logger.Logger, error) { fluentConfig, err := parseConfig(info.Config) if err != nil { return nil, errdefs.InvalidParameter(err) } tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, errdefs.InvalidParameter(err) } extra, err := info.ExtraAttributes(nil) if err != nil { return nil, errdefs.InvalidParameter(err) } logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: info.ContainerID, containerName: info.ContainerName, writer: log, extra: extra, }, nil } func (f *fluentd) Log(msg *logger.Message) error { data := map[string]string{ "container_id": f.containerID, "container_name": f.containerName, "source": msg.Source, "log": string(msg.Line), } for k, v := range f.extra { data[k] = v } if msg.PLogMetaData != nil { data["partial_message"] = "true" data["partial_id"] = msg.PLogMetaData.ID data["partial_ordinal"] = strconv.Itoa(msg.PLogMetaData.Ordinal) data["partial_last"] = strconv.FormatBool(msg.PLogMetaData.Last) } ts := msg.Timestamp logger.PutMessage(msg) // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. return f.writer.PostWithTime(f.tag, ts, data) } func (f *fluentd) Close() error { return f.writer.Close() } func (f *fluentd) Name() string { return name } // ValidateLogOpt looks for fluentd specific log option fluentd-address. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": case "env-regex": case "labels": case "labels-regex": case "tag": case addressKey: case asyncKey: case asyncConnectKey: case bufferLimitKey: case maxRetriesKey: case requestAckKey: case retryWaitKey: case subSecondPrecisionKey: // Accepted default: return errors.Errorf("unknown log opt '%s' for fluentd log driver", key) } } _, err := parseConfig(cfg) return err } func parseConfig(cfg map[string]string) (fluent.Config, error) { var config fluent.Config loc, err := parseAddress(cfg[addressKey]) if err != nil { return config, err } bufferLimit := defaultBufferLimit if cfg[bufferLimitKey] != "" { bl64, err := units.RAMInBytes(cfg[bufferLimitKey]) if err != nil { return config, err } bufferLimit = int(bl64) } retryWait := defaultRetryWait if cfg[retryWaitKey] != "" { rwd, err := time.ParseDuration(cfg[retryWaitKey]) if err != nil { return config, err } retryWait = int(rwd.Seconds() * 1000) } maxRetries := defaultMaxRetries if cfg[maxRetriesKey] != "" { mr64, err := strconv.ParseUint(cfg[maxRetriesKey], 10, strconv.IntSize) if err != nil { return config, err } maxRetries = int(mr64) } if cfg[asyncKey] != "" && cfg[asyncConnectKey] != "" { return config, errors.Errorf("conflicting options: cannot specify both '%s' and '%s", asyncKey, asyncConnectKey) } async := false if cfg[asyncKey] != "" { if async, err = strconv.ParseBool(cfg[asyncKey]); err != nil { return config, err } } // TODO fluentd-async-connect is deprecated in driver v1.4.0. Remove after two stable releases asyncConnect := false if cfg[asyncConnectKey] != "" { if asyncConnect, err = strconv.ParseBool(cfg[asyncConnectKey]); err != nil { return config, err } } subSecondPrecision := false if cfg[subSecondPrecisionKey] != "" { if subSecondPrecision, err = strconv.ParseBool(cfg[subSecondPrecisionKey]); err != nil { return config, err } } requestAck := false if cfg[requestAckKey] != "" { if requestAck, err = strconv.ParseBool(cfg[requestAckKey]); err != nil { return config, err } } config = fluent.Config{ FluentPort: loc.port, FluentHost: loc.host, FluentNetwork: loc.protocol, FluentSocketPath: loc.path, BufferLimit: bufferLimit, RetryWait: retryWait, MaxRetry: maxRetries, Async: async, AsyncConnect: asyncConnect, SubSecondPrecision: subSecondPrecision, RequestAck: requestAck, } return config, nil } func parseAddress(address string) (*location, error) { if address == "" { return &location{ protocol: defaultProtocol, host: defaultHost, port: defaultPort, path: "", }, nil } protocol := defaultProtocol givenAddress := address if urlutil.IsTransportURL(address) { url, err := url.Parse(address) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } // unix and unixgram socket if url.Scheme == "unix" || url.Scheme == "unixgram" { return &location{ protocol: url.Scheme, host: "", port: 0, path: url.Path, }, nil } // tcp|udp protocol = url.Scheme address = url.Host } host, port, err := net.SplitHostPort(address) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: defaultPort, path: "", }, nil } portnum, err := strconv.Atoi(port) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: portnum, path: "", }, nil }
// Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( "math" "net" "net/url" "strconv" "strings" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/urlutil" units "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type fluentd struct { tag string containerID string containerName string writer *fluent.Fluent extra map[string]string } type location struct { protocol string host string port int path string } const ( name = "fluentd" defaultBufferLimit = 1024 * 1024 defaultHost = "127.0.0.1" defaultPort = 24224 defaultProtocol = "tcp" // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] defaultMaxRetries = math.MaxInt32 defaultRetryWait = 1000 addressKey = "fluentd-address" asyncKey = "fluentd-async" asyncConnectKey = "fluentd-async-connect" // deprecated option (use fluent-async instead) bufferLimitKey = "fluentd-buffer-limit" maxRetriesKey = "fluentd-max-retries" requestAckKey = "fluentd-request-ack" retryWaitKey = "fluentd-retry-wait" subSecondPrecisionKey = "fluentd-sub-second-precision" ) func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. func New(info logger.Info) (logger.Logger, error) { fluentConfig, err := parseConfig(info.Config) if err != nil { return nil, errdefs.InvalidParameter(err) } tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, errdefs.InvalidParameter(err) } extra, err := info.ExtraAttributes(nil) if err != nil { return nil, errdefs.InvalidParameter(err) } logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: info.ContainerID, containerName: info.ContainerName, writer: log, extra: extra, }, nil } func (f *fluentd) Log(msg *logger.Message) error { data := map[string]string{ "container_id": f.containerID, "container_name": f.containerName, "source": msg.Source, "log": string(msg.Line), } for k, v := range f.extra { data[k] = v } if msg.PLogMetaData != nil { data["partial_message"] = "true" data["partial_id"] = msg.PLogMetaData.ID data["partial_ordinal"] = strconv.Itoa(msg.PLogMetaData.Ordinal) data["partial_last"] = strconv.FormatBool(msg.PLogMetaData.Last) } ts := msg.Timestamp logger.PutMessage(msg) // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. return f.writer.PostWithTime(f.tag, ts, data) } func (f *fluentd) Close() error { return f.writer.Close() } func (f *fluentd) Name() string { return name } // ValidateLogOpt looks for fluentd specific log option fluentd-address. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": case "env-regex": case "labels": case "labels-regex": case "tag": case addressKey: case asyncKey: case asyncConnectKey: case bufferLimitKey: case maxRetriesKey: case requestAckKey: case retryWaitKey: case subSecondPrecisionKey: // Accepted default: return errors.Errorf("unknown log opt '%s' for fluentd log driver", key) } } _, err := parseConfig(cfg) return err } func parseConfig(cfg map[string]string) (fluent.Config, error) { var config fluent.Config loc, err := parseAddress(cfg[addressKey]) if err != nil { return config, err } bufferLimit := defaultBufferLimit if cfg[bufferLimitKey] != "" { bl64, err := units.RAMInBytes(cfg[bufferLimitKey]) if err != nil { return config, err } bufferLimit = int(bl64) } retryWait := defaultRetryWait if cfg[retryWaitKey] != "" { rwd, err := time.ParseDuration(cfg[retryWaitKey]) if err != nil { return config, err } retryWait = int(rwd.Seconds() * 1000) } maxRetries := defaultMaxRetries if cfg[maxRetriesKey] != "" { mr64, err := strconv.ParseUint(cfg[maxRetriesKey], 10, strconv.IntSize) if err != nil { return config, err } maxRetries = int(mr64) } if cfg[asyncKey] != "" && cfg[asyncConnectKey] != "" { return config, errors.Errorf("conflicting options: cannot specify both '%s' and '%s", asyncKey, asyncConnectKey) } async := false if cfg[asyncKey] != "" { if async, err = strconv.ParseBool(cfg[asyncKey]); err != nil { return config, err } } // TODO fluentd-async-connect is deprecated in driver v1.4.0. Remove after two stable releases asyncConnect := false if cfg[asyncConnectKey] != "" { if asyncConnect, err = strconv.ParseBool(cfg[asyncConnectKey]); err != nil { return config, err } } subSecondPrecision := false if cfg[subSecondPrecisionKey] != "" { if subSecondPrecision, err = strconv.ParseBool(cfg[subSecondPrecisionKey]); err != nil { return config, err } } requestAck := false if cfg[requestAckKey] != "" { if requestAck, err = strconv.ParseBool(cfg[requestAckKey]); err != nil { return config, err } } config = fluent.Config{ FluentPort: loc.port, FluentHost: loc.host, FluentNetwork: loc.protocol, FluentSocketPath: loc.path, BufferLimit: bufferLimit, RetryWait: retryWait, MaxRetry: maxRetries, Async: async, AsyncConnect: asyncConnect, SubSecondPrecision: subSecondPrecision, RequestAck: requestAck, ForceStopAsyncSend: async || asyncConnect, } return config, nil } func parseAddress(address string) (*location, error) { if address == "" { return &location{ protocol: defaultProtocol, host: defaultHost, port: defaultPort, path: "", }, nil } protocol := defaultProtocol givenAddress := address if urlutil.IsTransportURL(address) { url, err := url.Parse(address) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } // unix and unixgram socket if url.Scheme == "unix" || url.Scheme == "unixgram" { return &location{ protocol: url.Scheme, host: "", port: 0, path: url.Path, }, nil } // tcp|udp protocol = url.Scheme address = url.Host } host, port, err := net.SplitHostPort(address) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: defaultPort, path: "", }, nil } portnum, err := strconv.Atoi(port) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: portnum, path: "", }, nil }
akerouanton
0f92cf20442799364539cd1a121339a0395b2b31
f6848ae321b69efb0c1a0304a394e1e3f141b1e1
This option should probably be prefixed with `fluentd-` (as it's specific to this driver)
thaJeztah
4,403
moby/moby
42,979
vendor: github.com/fluent/fluent-logger-golang v1.8.0
Fixes #40063. **- What I did** Update the fluent logger library to v1.8.0. Following commits/PRs were merged since last bump: * [Add callback for error handling when using async](https://github.com/fluent/fluent-logger-golang/pull/97) * [Fix panic when accessing unexported struct field](https://github.com/fluent/fluent-logger-golang/pull/99) * [Properly stop logger during (re)connect failure](https://github.com/fluent/fluent-logger-golang/pull/82) * [Support a TLS-enabled connection](https://github.com/fluent/fluent-logger-golang/pull/107) * [Do not allow writing events after fluent.Close()](https://github.com/fluent/fluent-logger-golang/pull/105) **- How to verify it** Before this change, running the following commands would lead to docker hanging on the `kill` command: ```console $ docker run --rm -it --name test --log-driver=fluentd --log-opt fluentd-address=fluentdhost:24224 --log-opt fluentd-async=true debian /bin/bash -c 'echo test; sleep infinity;' # In another terminal: $ docker kill test ``` **- Description for the changelog** - Forcefully stop fluentd logger when a container stop and the logger is running in async mode.
null
2021-11-01 10:12:54+00:00
2021-12-02 19:51:04+00:00
daemon/logger/fluentd/fluentd.go
// Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( "math" "net" "net/url" "strconv" "strings" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/urlutil" units "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type fluentd struct { tag string containerID string containerName string writer *fluent.Fluent extra map[string]string } type location struct { protocol string host string port int path string } const ( name = "fluentd" defaultBufferLimit = 1024 * 1024 defaultHost = "127.0.0.1" defaultPort = 24224 defaultProtocol = "tcp" // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] defaultMaxRetries = math.MaxInt32 defaultRetryWait = 1000 addressKey = "fluentd-address" asyncKey = "fluentd-async" asyncConnectKey = "fluentd-async-connect" // deprecated option (use fluent-async instead) bufferLimitKey = "fluentd-buffer-limit" maxRetriesKey = "fluentd-max-retries" requestAckKey = "fluentd-request-ack" retryWaitKey = "fluentd-retry-wait" subSecondPrecisionKey = "fluentd-sub-second-precision" ) func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. func New(info logger.Info) (logger.Logger, error) { fluentConfig, err := parseConfig(info.Config) if err != nil { return nil, errdefs.InvalidParameter(err) } tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, errdefs.InvalidParameter(err) } extra, err := info.ExtraAttributes(nil) if err != nil { return nil, errdefs.InvalidParameter(err) } logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: info.ContainerID, containerName: info.ContainerName, writer: log, extra: extra, }, nil } func (f *fluentd) Log(msg *logger.Message) error { data := map[string]string{ "container_id": f.containerID, "container_name": f.containerName, "source": msg.Source, "log": string(msg.Line), } for k, v := range f.extra { data[k] = v } if msg.PLogMetaData != nil { data["partial_message"] = "true" data["partial_id"] = msg.PLogMetaData.ID data["partial_ordinal"] = strconv.Itoa(msg.PLogMetaData.Ordinal) data["partial_last"] = strconv.FormatBool(msg.PLogMetaData.Last) } ts := msg.Timestamp logger.PutMessage(msg) // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. return f.writer.PostWithTime(f.tag, ts, data) } func (f *fluentd) Close() error { return f.writer.Close() } func (f *fluentd) Name() string { return name } // ValidateLogOpt looks for fluentd specific log option fluentd-address. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": case "env-regex": case "labels": case "labels-regex": case "tag": case addressKey: case asyncKey: case asyncConnectKey: case bufferLimitKey: case maxRetriesKey: case requestAckKey: case retryWaitKey: case subSecondPrecisionKey: // Accepted default: return errors.Errorf("unknown log opt '%s' for fluentd log driver", key) } } _, err := parseConfig(cfg) return err } func parseConfig(cfg map[string]string) (fluent.Config, error) { var config fluent.Config loc, err := parseAddress(cfg[addressKey]) if err != nil { return config, err } bufferLimit := defaultBufferLimit if cfg[bufferLimitKey] != "" { bl64, err := units.RAMInBytes(cfg[bufferLimitKey]) if err != nil { return config, err } bufferLimit = int(bl64) } retryWait := defaultRetryWait if cfg[retryWaitKey] != "" { rwd, err := time.ParseDuration(cfg[retryWaitKey]) if err != nil { return config, err } retryWait = int(rwd.Seconds() * 1000) } maxRetries := defaultMaxRetries if cfg[maxRetriesKey] != "" { mr64, err := strconv.ParseUint(cfg[maxRetriesKey], 10, strconv.IntSize) if err != nil { return config, err } maxRetries = int(mr64) } if cfg[asyncKey] != "" && cfg[asyncConnectKey] != "" { return config, errors.Errorf("conflicting options: cannot specify both '%s' and '%s", asyncKey, asyncConnectKey) } async := false if cfg[asyncKey] != "" { if async, err = strconv.ParseBool(cfg[asyncKey]); err != nil { return config, err } } // TODO fluentd-async-connect is deprecated in driver v1.4.0. Remove after two stable releases asyncConnect := false if cfg[asyncConnectKey] != "" { if asyncConnect, err = strconv.ParseBool(cfg[asyncConnectKey]); err != nil { return config, err } } subSecondPrecision := false if cfg[subSecondPrecisionKey] != "" { if subSecondPrecision, err = strconv.ParseBool(cfg[subSecondPrecisionKey]); err != nil { return config, err } } requestAck := false if cfg[requestAckKey] != "" { if requestAck, err = strconv.ParseBool(cfg[requestAckKey]); err != nil { return config, err } } config = fluent.Config{ FluentPort: loc.port, FluentHost: loc.host, FluentNetwork: loc.protocol, FluentSocketPath: loc.path, BufferLimit: bufferLimit, RetryWait: retryWait, MaxRetry: maxRetries, Async: async, AsyncConnect: asyncConnect, SubSecondPrecision: subSecondPrecision, RequestAck: requestAck, } return config, nil } func parseAddress(address string) (*location, error) { if address == "" { return &location{ protocol: defaultProtocol, host: defaultHost, port: defaultPort, path: "", }, nil } protocol := defaultProtocol givenAddress := address if urlutil.IsTransportURL(address) { url, err := url.Parse(address) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } // unix and unixgram socket if url.Scheme == "unix" || url.Scheme == "unixgram" { return &location{ protocol: url.Scheme, host: "", port: 0, path: url.Path, }, nil } // tcp|udp protocol = url.Scheme address = url.Host } host, port, err := net.SplitHostPort(address) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: defaultPort, path: "", }, nil } portnum, err := strconv.Atoi(port) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: portnum, path: "", }, nil }
// Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( "math" "net" "net/url" "strconv" "strings" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/urlutil" units "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type fluentd struct { tag string containerID string containerName string writer *fluent.Fluent extra map[string]string } type location struct { protocol string host string port int path string } const ( name = "fluentd" defaultBufferLimit = 1024 * 1024 defaultHost = "127.0.0.1" defaultPort = 24224 defaultProtocol = "tcp" // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] defaultMaxRetries = math.MaxInt32 defaultRetryWait = 1000 addressKey = "fluentd-address" asyncKey = "fluentd-async" asyncConnectKey = "fluentd-async-connect" // deprecated option (use fluent-async instead) bufferLimitKey = "fluentd-buffer-limit" maxRetriesKey = "fluentd-max-retries" requestAckKey = "fluentd-request-ack" retryWaitKey = "fluentd-retry-wait" subSecondPrecisionKey = "fluentd-sub-second-precision" ) func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. func New(info logger.Info) (logger.Logger, error) { fluentConfig, err := parseConfig(info.Config) if err != nil { return nil, errdefs.InvalidParameter(err) } tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, errdefs.InvalidParameter(err) } extra, err := info.ExtraAttributes(nil) if err != nil { return nil, errdefs.InvalidParameter(err) } logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: info.ContainerID, containerName: info.ContainerName, writer: log, extra: extra, }, nil } func (f *fluentd) Log(msg *logger.Message) error { data := map[string]string{ "container_id": f.containerID, "container_name": f.containerName, "source": msg.Source, "log": string(msg.Line), } for k, v := range f.extra { data[k] = v } if msg.PLogMetaData != nil { data["partial_message"] = "true" data["partial_id"] = msg.PLogMetaData.ID data["partial_ordinal"] = strconv.Itoa(msg.PLogMetaData.Ordinal) data["partial_last"] = strconv.FormatBool(msg.PLogMetaData.Last) } ts := msg.Timestamp logger.PutMessage(msg) // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. return f.writer.PostWithTime(f.tag, ts, data) } func (f *fluentd) Close() error { return f.writer.Close() } func (f *fluentd) Name() string { return name } // ValidateLogOpt looks for fluentd specific log option fluentd-address. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": case "env-regex": case "labels": case "labels-regex": case "tag": case addressKey: case asyncKey: case asyncConnectKey: case bufferLimitKey: case maxRetriesKey: case requestAckKey: case retryWaitKey: case subSecondPrecisionKey: // Accepted default: return errors.Errorf("unknown log opt '%s' for fluentd log driver", key) } } _, err := parseConfig(cfg) return err } func parseConfig(cfg map[string]string) (fluent.Config, error) { var config fluent.Config loc, err := parseAddress(cfg[addressKey]) if err != nil { return config, err } bufferLimit := defaultBufferLimit if cfg[bufferLimitKey] != "" { bl64, err := units.RAMInBytes(cfg[bufferLimitKey]) if err != nil { return config, err } bufferLimit = int(bl64) } retryWait := defaultRetryWait if cfg[retryWaitKey] != "" { rwd, err := time.ParseDuration(cfg[retryWaitKey]) if err != nil { return config, err } retryWait = int(rwd.Seconds() * 1000) } maxRetries := defaultMaxRetries if cfg[maxRetriesKey] != "" { mr64, err := strconv.ParseUint(cfg[maxRetriesKey], 10, strconv.IntSize) if err != nil { return config, err } maxRetries = int(mr64) } if cfg[asyncKey] != "" && cfg[asyncConnectKey] != "" { return config, errors.Errorf("conflicting options: cannot specify both '%s' and '%s", asyncKey, asyncConnectKey) } async := false if cfg[asyncKey] != "" { if async, err = strconv.ParseBool(cfg[asyncKey]); err != nil { return config, err } } // TODO fluentd-async-connect is deprecated in driver v1.4.0. Remove after two stable releases asyncConnect := false if cfg[asyncConnectKey] != "" { if asyncConnect, err = strconv.ParseBool(cfg[asyncConnectKey]); err != nil { return config, err } } subSecondPrecision := false if cfg[subSecondPrecisionKey] != "" { if subSecondPrecision, err = strconv.ParseBool(cfg[subSecondPrecisionKey]); err != nil { return config, err } } requestAck := false if cfg[requestAckKey] != "" { if requestAck, err = strconv.ParseBool(cfg[requestAckKey]); err != nil { return config, err } } config = fluent.Config{ FluentPort: loc.port, FluentHost: loc.host, FluentNetwork: loc.protocol, FluentSocketPath: loc.path, BufferLimit: bufferLimit, RetryWait: retryWait, MaxRetry: maxRetries, Async: async, AsyncConnect: asyncConnect, SubSecondPrecision: subSecondPrecision, RequestAck: requestAck, ForceStopAsyncSend: async || asyncConnect, } return config, nil } func parseAddress(address string) (*location, error) { if address == "" { return &location{ protocol: defaultProtocol, host: defaultHost, port: defaultPort, path: "", }, nil } protocol := defaultProtocol givenAddress := address if urlutil.IsTransportURL(address) { url, err := url.Parse(address) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } // unix and unixgram socket if url.Scheme == "unix" || url.Scheme == "unixgram" { return &location{ protocol: url.Scheme, host: "", port: 0, path: url.Path, }, nil } // tcp|udp protocol = url.Scheme address = url.Host } host, port, err := net.SplitHostPort(address) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: defaultPort, path: "", }, nil } portnum, err := strconv.Atoi(port) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: portnum, path: "", }, nil }
akerouanton
0f92cf20442799364539cd1a121339a0395b2b31
f6848ae321b69efb0c1a0304a394e1e3f141b1e1
Slightly curious though; would there be reasons for someone to disable this? (wondering if it needs to be configurable, or if using the default would be what most/all users would want)
thaJeztah
4,404
moby/moby
42,979
vendor: github.com/fluent/fluent-logger-golang v1.8.0
Fixes #40063. **- What I did** Update the fluent logger library to v1.8.0. Following commits/PRs were merged since last bump: * [Add callback for error handling when using async](https://github.com/fluent/fluent-logger-golang/pull/97) * [Fix panic when accessing unexported struct field](https://github.com/fluent/fluent-logger-golang/pull/99) * [Properly stop logger during (re)connect failure](https://github.com/fluent/fluent-logger-golang/pull/82) * [Support a TLS-enabled connection](https://github.com/fluent/fluent-logger-golang/pull/107) * [Do not allow writing events after fluent.Close()](https://github.com/fluent/fluent-logger-golang/pull/105) **- How to verify it** Before this change, running the following commands would lead to docker hanging on the `kill` command: ```console $ docker run --rm -it --name test --log-driver=fluentd --log-opt fluentd-address=fluentdhost:24224 --log-opt fluentd-async=true debian /bin/bash -c 'echo test; sleep infinity;' # In another terminal: $ docker kill test ``` **- Description for the changelog** - Forcefully stop fluentd logger when a container stop and the logger is running in async mode.
null
2021-11-01 10:12:54+00:00
2021-12-02 19:51:04+00:00
daemon/logger/fluentd/fluentd.go
// Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( "math" "net" "net/url" "strconv" "strings" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/urlutil" units "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type fluentd struct { tag string containerID string containerName string writer *fluent.Fluent extra map[string]string } type location struct { protocol string host string port int path string } const ( name = "fluentd" defaultBufferLimit = 1024 * 1024 defaultHost = "127.0.0.1" defaultPort = 24224 defaultProtocol = "tcp" // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] defaultMaxRetries = math.MaxInt32 defaultRetryWait = 1000 addressKey = "fluentd-address" asyncKey = "fluentd-async" asyncConnectKey = "fluentd-async-connect" // deprecated option (use fluent-async instead) bufferLimitKey = "fluentd-buffer-limit" maxRetriesKey = "fluentd-max-retries" requestAckKey = "fluentd-request-ack" retryWaitKey = "fluentd-retry-wait" subSecondPrecisionKey = "fluentd-sub-second-precision" ) func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. func New(info logger.Info) (logger.Logger, error) { fluentConfig, err := parseConfig(info.Config) if err != nil { return nil, errdefs.InvalidParameter(err) } tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, errdefs.InvalidParameter(err) } extra, err := info.ExtraAttributes(nil) if err != nil { return nil, errdefs.InvalidParameter(err) } logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: info.ContainerID, containerName: info.ContainerName, writer: log, extra: extra, }, nil } func (f *fluentd) Log(msg *logger.Message) error { data := map[string]string{ "container_id": f.containerID, "container_name": f.containerName, "source": msg.Source, "log": string(msg.Line), } for k, v := range f.extra { data[k] = v } if msg.PLogMetaData != nil { data["partial_message"] = "true" data["partial_id"] = msg.PLogMetaData.ID data["partial_ordinal"] = strconv.Itoa(msg.PLogMetaData.Ordinal) data["partial_last"] = strconv.FormatBool(msg.PLogMetaData.Last) } ts := msg.Timestamp logger.PutMessage(msg) // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. return f.writer.PostWithTime(f.tag, ts, data) } func (f *fluentd) Close() error { return f.writer.Close() } func (f *fluentd) Name() string { return name } // ValidateLogOpt looks for fluentd specific log option fluentd-address. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": case "env-regex": case "labels": case "labels-regex": case "tag": case addressKey: case asyncKey: case asyncConnectKey: case bufferLimitKey: case maxRetriesKey: case requestAckKey: case retryWaitKey: case subSecondPrecisionKey: // Accepted default: return errors.Errorf("unknown log opt '%s' for fluentd log driver", key) } } _, err := parseConfig(cfg) return err } func parseConfig(cfg map[string]string) (fluent.Config, error) { var config fluent.Config loc, err := parseAddress(cfg[addressKey]) if err != nil { return config, err } bufferLimit := defaultBufferLimit if cfg[bufferLimitKey] != "" { bl64, err := units.RAMInBytes(cfg[bufferLimitKey]) if err != nil { return config, err } bufferLimit = int(bl64) } retryWait := defaultRetryWait if cfg[retryWaitKey] != "" { rwd, err := time.ParseDuration(cfg[retryWaitKey]) if err != nil { return config, err } retryWait = int(rwd.Seconds() * 1000) } maxRetries := defaultMaxRetries if cfg[maxRetriesKey] != "" { mr64, err := strconv.ParseUint(cfg[maxRetriesKey], 10, strconv.IntSize) if err != nil { return config, err } maxRetries = int(mr64) } if cfg[asyncKey] != "" && cfg[asyncConnectKey] != "" { return config, errors.Errorf("conflicting options: cannot specify both '%s' and '%s", asyncKey, asyncConnectKey) } async := false if cfg[asyncKey] != "" { if async, err = strconv.ParseBool(cfg[asyncKey]); err != nil { return config, err } } // TODO fluentd-async-connect is deprecated in driver v1.4.0. Remove after two stable releases asyncConnect := false if cfg[asyncConnectKey] != "" { if asyncConnect, err = strconv.ParseBool(cfg[asyncConnectKey]); err != nil { return config, err } } subSecondPrecision := false if cfg[subSecondPrecisionKey] != "" { if subSecondPrecision, err = strconv.ParseBool(cfg[subSecondPrecisionKey]); err != nil { return config, err } } requestAck := false if cfg[requestAckKey] != "" { if requestAck, err = strconv.ParseBool(cfg[requestAckKey]); err != nil { return config, err } } config = fluent.Config{ FluentPort: loc.port, FluentHost: loc.host, FluentNetwork: loc.protocol, FluentSocketPath: loc.path, BufferLimit: bufferLimit, RetryWait: retryWait, MaxRetry: maxRetries, Async: async, AsyncConnect: asyncConnect, SubSecondPrecision: subSecondPrecision, RequestAck: requestAck, } return config, nil } func parseAddress(address string) (*location, error) { if address == "" { return &location{ protocol: defaultProtocol, host: defaultHost, port: defaultPort, path: "", }, nil } protocol := defaultProtocol givenAddress := address if urlutil.IsTransportURL(address) { url, err := url.Parse(address) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } // unix and unixgram socket if url.Scheme == "unix" || url.Scheme == "unixgram" { return &location{ protocol: url.Scheme, host: "", port: 0, path: url.Path, }, nil } // tcp|udp protocol = url.Scheme address = url.Host } host, port, err := net.SplitHostPort(address) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: defaultPort, path: "", }, nil } portnum, err := strconv.Atoi(port) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: portnum, path: "", }, nil }
// Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( "math" "net" "net/url" "strconv" "strings" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/urlutil" units "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type fluentd struct { tag string containerID string containerName string writer *fluent.Fluent extra map[string]string } type location struct { protocol string host string port int path string } const ( name = "fluentd" defaultBufferLimit = 1024 * 1024 defaultHost = "127.0.0.1" defaultPort = 24224 defaultProtocol = "tcp" // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] defaultMaxRetries = math.MaxInt32 defaultRetryWait = 1000 addressKey = "fluentd-address" asyncKey = "fluentd-async" asyncConnectKey = "fluentd-async-connect" // deprecated option (use fluent-async instead) bufferLimitKey = "fluentd-buffer-limit" maxRetriesKey = "fluentd-max-retries" requestAckKey = "fluentd-request-ack" retryWaitKey = "fluentd-retry-wait" subSecondPrecisionKey = "fluentd-sub-second-precision" ) func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. func New(info logger.Info) (logger.Logger, error) { fluentConfig, err := parseConfig(info.Config) if err != nil { return nil, errdefs.InvalidParameter(err) } tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, errdefs.InvalidParameter(err) } extra, err := info.ExtraAttributes(nil) if err != nil { return nil, errdefs.InvalidParameter(err) } logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: info.ContainerID, containerName: info.ContainerName, writer: log, extra: extra, }, nil } func (f *fluentd) Log(msg *logger.Message) error { data := map[string]string{ "container_id": f.containerID, "container_name": f.containerName, "source": msg.Source, "log": string(msg.Line), } for k, v := range f.extra { data[k] = v } if msg.PLogMetaData != nil { data["partial_message"] = "true" data["partial_id"] = msg.PLogMetaData.ID data["partial_ordinal"] = strconv.Itoa(msg.PLogMetaData.Ordinal) data["partial_last"] = strconv.FormatBool(msg.PLogMetaData.Last) } ts := msg.Timestamp logger.PutMessage(msg) // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. return f.writer.PostWithTime(f.tag, ts, data) } func (f *fluentd) Close() error { return f.writer.Close() } func (f *fluentd) Name() string { return name } // ValidateLogOpt looks for fluentd specific log option fluentd-address. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": case "env-regex": case "labels": case "labels-regex": case "tag": case addressKey: case asyncKey: case asyncConnectKey: case bufferLimitKey: case maxRetriesKey: case requestAckKey: case retryWaitKey: case subSecondPrecisionKey: // Accepted default: return errors.Errorf("unknown log opt '%s' for fluentd log driver", key) } } _, err := parseConfig(cfg) return err } func parseConfig(cfg map[string]string) (fluent.Config, error) { var config fluent.Config loc, err := parseAddress(cfg[addressKey]) if err != nil { return config, err } bufferLimit := defaultBufferLimit if cfg[bufferLimitKey] != "" { bl64, err := units.RAMInBytes(cfg[bufferLimitKey]) if err != nil { return config, err } bufferLimit = int(bl64) } retryWait := defaultRetryWait if cfg[retryWaitKey] != "" { rwd, err := time.ParseDuration(cfg[retryWaitKey]) if err != nil { return config, err } retryWait = int(rwd.Seconds() * 1000) } maxRetries := defaultMaxRetries if cfg[maxRetriesKey] != "" { mr64, err := strconv.ParseUint(cfg[maxRetriesKey], 10, strconv.IntSize) if err != nil { return config, err } maxRetries = int(mr64) } if cfg[asyncKey] != "" && cfg[asyncConnectKey] != "" { return config, errors.Errorf("conflicting options: cannot specify both '%s' and '%s", asyncKey, asyncConnectKey) } async := false if cfg[asyncKey] != "" { if async, err = strconv.ParseBool(cfg[asyncKey]); err != nil { return config, err } } // TODO fluentd-async-connect is deprecated in driver v1.4.0. Remove after two stable releases asyncConnect := false if cfg[asyncConnectKey] != "" { if asyncConnect, err = strconv.ParseBool(cfg[asyncConnectKey]); err != nil { return config, err } } subSecondPrecision := false if cfg[subSecondPrecisionKey] != "" { if subSecondPrecision, err = strconv.ParseBool(cfg[subSecondPrecisionKey]); err != nil { return config, err } } requestAck := false if cfg[requestAckKey] != "" { if requestAck, err = strconv.ParseBool(cfg[requestAckKey]); err != nil { return config, err } } config = fluent.Config{ FluentPort: loc.port, FluentHost: loc.host, FluentNetwork: loc.protocol, FluentSocketPath: loc.path, BufferLimit: bufferLimit, RetryWait: retryWait, MaxRetry: maxRetries, Async: async, AsyncConnect: asyncConnect, SubSecondPrecision: subSecondPrecision, RequestAck: requestAck, ForceStopAsyncSend: async || asyncConnect, } return config, nil } func parseAddress(address string) (*location, error) { if address == "" { return &location{ protocol: defaultProtocol, host: defaultHost, port: defaultPort, path: "", }, nil } protocol := defaultProtocol givenAddress := address if urlutil.IsTransportURL(address) { url, err := url.Parse(address) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } // unix and unixgram socket if url.Scheme == "unix" || url.Scheme == "unixgram" { return &location{ protocol: url.Scheme, host: "", port: 0, path: url.Path, }, nil } // tcp|udp protocol = url.Scheme address = url.Host } host, port, err := net.SplitHostPort(address) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: defaultPort, path: "", }, nil } portnum, err := strconv.Atoi(port) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: portnum, path: "", }, nil }
akerouanton
0f92cf20442799364539cd1a121339a0395b2b31
f6848ae321b69efb0c1a0304a394e1e3f141b1e1
Oops, indeed I forgot the prefix. Since dockerd could hang when this flag is disabled and fluentd is down, I think it's not really safe to disable it (unless #35716 got merged and timeout handling is improved in fluent-logger-golang). IMHO most people want to keep this flag enabled, but some few users might want to never lost logs (that's why I made it configurable). However, AFAIK dockerd doesn't log when containers logs can't be transmitted to fluentd, although this could be fixed by leveraging fluent/fluent-logger-golang#97 (which is vendored in this PR). From your maintainer's point of view, is it ok if logs are lost when fluentd is down? Would you like to log the container logs that can't be transmitted?
akerouanton
4,405
moby/moby
42,979
vendor: github.com/fluent/fluent-logger-golang v1.8.0
Fixes #40063. **- What I did** Update the fluent logger library to v1.8.0. Following commits/PRs were merged since last bump: * [Add callback for error handling when using async](https://github.com/fluent/fluent-logger-golang/pull/97) * [Fix panic when accessing unexported struct field](https://github.com/fluent/fluent-logger-golang/pull/99) * [Properly stop logger during (re)connect failure](https://github.com/fluent/fluent-logger-golang/pull/82) * [Support a TLS-enabled connection](https://github.com/fluent/fluent-logger-golang/pull/107) * [Do not allow writing events after fluent.Close()](https://github.com/fluent/fluent-logger-golang/pull/105) **- How to verify it** Before this change, running the following commands would lead to docker hanging on the `kill` command: ```console $ docker run --rm -it --name test --log-driver=fluentd --log-opt fluentd-address=fluentdhost:24224 --log-opt fluentd-async=true debian /bin/bash -c 'echo test; sleep infinity;' # In another terminal: $ docker kill test ``` **- Description for the changelog** - Forcefully stop fluentd logger when a container stop and the logger is running in async mode.
null
2021-11-01 10:12:54+00:00
2021-12-02 19:51:04+00:00
daemon/logger/fluentd/fluentd.go
// Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( "math" "net" "net/url" "strconv" "strings" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/urlutil" units "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type fluentd struct { tag string containerID string containerName string writer *fluent.Fluent extra map[string]string } type location struct { protocol string host string port int path string } const ( name = "fluentd" defaultBufferLimit = 1024 * 1024 defaultHost = "127.0.0.1" defaultPort = 24224 defaultProtocol = "tcp" // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] defaultMaxRetries = math.MaxInt32 defaultRetryWait = 1000 addressKey = "fluentd-address" asyncKey = "fluentd-async" asyncConnectKey = "fluentd-async-connect" // deprecated option (use fluent-async instead) bufferLimitKey = "fluentd-buffer-limit" maxRetriesKey = "fluentd-max-retries" requestAckKey = "fluentd-request-ack" retryWaitKey = "fluentd-retry-wait" subSecondPrecisionKey = "fluentd-sub-second-precision" ) func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. func New(info logger.Info) (logger.Logger, error) { fluentConfig, err := parseConfig(info.Config) if err != nil { return nil, errdefs.InvalidParameter(err) } tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, errdefs.InvalidParameter(err) } extra, err := info.ExtraAttributes(nil) if err != nil { return nil, errdefs.InvalidParameter(err) } logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: info.ContainerID, containerName: info.ContainerName, writer: log, extra: extra, }, nil } func (f *fluentd) Log(msg *logger.Message) error { data := map[string]string{ "container_id": f.containerID, "container_name": f.containerName, "source": msg.Source, "log": string(msg.Line), } for k, v := range f.extra { data[k] = v } if msg.PLogMetaData != nil { data["partial_message"] = "true" data["partial_id"] = msg.PLogMetaData.ID data["partial_ordinal"] = strconv.Itoa(msg.PLogMetaData.Ordinal) data["partial_last"] = strconv.FormatBool(msg.PLogMetaData.Last) } ts := msg.Timestamp logger.PutMessage(msg) // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. return f.writer.PostWithTime(f.tag, ts, data) } func (f *fluentd) Close() error { return f.writer.Close() } func (f *fluentd) Name() string { return name } // ValidateLogOpt looks for fluentd specific log option fluentd-address. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": case "env-regex": case "labels": case "labels-regex": case "tag": case addressKey: case asyncKey: case asyncConnectKey: case bufferLimitKey: case maxRetriesKey: case requestAckKey: case retryWaitKey: case subSecondPrecisionKey: // Accepted default: return errors.Errorf("unknown log opt '%s' for fluentd log driver", key) } } _, err := parseConfig(cfg) return err } func parseConfig(cfg map[string]string) (fluent.Config, error) { var config fluent.Config loc, err := parseAddress(cfg[addressKey]) if err != nil { return config, err } bufferLimit := defaultBufferLimit if cfg[bufferLimitKey] != "" { bl64, err := units.RAMInBytes(cfg[bufferLimitKey]) if err != nil { return config, err } bufferLimit = int(bl64) } retryWait := defaultRetryWait if cfg[retryWaitKey] != "" { rwd, err := time.ParseDuration(cfg[retryWaitKey]) if err != nil { return config, err } retryWait = int(rwd.Seconds() * 1000) } maxRetries := defaultMaxRetries if cfg[maxRetriesKey] != "" { mr64, err := strconv.ParseUint(cfg[maxRetriesKey], 10, strconv.IntSize) if err != nil { return config, err } maxRetries = int(mr64) } if cfg[asyncKey] != "" && cfg[asyncConnectKey] != "" { return config, errors.Errorf("conflicting options: cannot specify both '%s' and '%s", asyncKey, asyncConnectKey) } async := false if cfg[asyncKey] != "" { if async, err = strconv.ParseBool(cfg[asyncKey]); err != nil { return config, err } } // TODO fluentd-async-connect is deprecated in driver v1.4.0. Remove after two stable releases asyncConnect := false if cfg[asyncConnectKey] != "" { if asyncConnect, err = strconv.ParseBool(cfg[asyncConnectKey]); err != nil { return config, err } } subSecondPrecision := false if cfg[subSecondPrecisionKey] != "" { if subSecondPrecision, err = strconv.ParseBool(cfg[subSecondPrecisionKey]); err != nil { return config, err } } requestAck := false if cfg[requestAckKey] != "" { if requestAck, err = strconv.ParseBool(cfg[requestAckKey]); err != nil { return config, err } } config = fluent.Config{ FluentPort: loc.port, FluentHost: loc.host, FluentNetwork: loc.protocol, FluentSocketPath: loc.path, BufferLimit: bufferLimit, RetryWait: retryWait, MaxRetry: maxRetries, Async: async, AsyncConnect: asyncConnect, SubSecondPrecision: subSecondPrecision, RequestAck: requestAck, } return config, nil } func parseAddress(address string) (*location, error) { if address == "" { return &location{ protocol: defaultProtocol, host: defaultHost, port: defaultPort, path: "", }, nil } protocol := defaultProtocol givenAddress := address if urlutil.IsTransportURL(address) { url, err := url.Parse(address) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } // unix and unixgram socket if url.Scheme == "unix" || url.Scheme == "unixgram" { return &location{ protocol: url.Scheme, host: "", port: 0, path: url.Path, }, nil } // tcp|udp protocol = url.Scheme address = url.Host } host, port, err := net.SplitHostPort(address) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: defaultPort, path: "", }, nil } portnum, err := strconv.Atoi(port) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: portnum, path: "", }, nil }
// Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( "math" "net" "net/url" "strconv" "strings" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/urlutil" units "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type fluentd struct { tag string containerID string containerName string writer *fluent.Fluent extra map[string]string } type location struct { protocol string host string port int path string } const ( name = "fluentd" defaultBufferLimit = 1024 * 1024 defaultHost = "127.0.0.1" defaultPort = 24224 defaultProtocol = "tcp" // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] defaultMaxRetries = math.MaxInt32 defaultRetryWait = 1000 addressKey = "fluentd-address" asyncKey = "fluentd-async" asyncConnectKey = "fluentd-async-connect" // deprecated option (use fluent-async instead) bufferLimitKey = "fluentd-buffer-limit" maxRetriesKey = "fluentd-max-retries" requestAckKey = "fluentd-request-ack" retryWaitKey = "fluentd-retry-wait" subSecondPrecisionKey = "fluentd-sub-second-precision" ) func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. func New(info logger.Info) (logger.Logger, error) { fluentConfig, err := parseConfig(info.Config) if err != nil { return nil, errdefs.InvalidParameter(err) } tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, errdefs.InvalidParameter(err) } extra, err := info.ExtraAttributes(nil) if err != nil { return nil, errdefs.InvalidParameter(err) } logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: info.ContainerID, containerName: info.ContainerName, writer: log, extra: extra, }, nil } func (f *fluentd) Log(msg *logger.Message) error { data := map[string]string{ "container_id": f.containerID, "container_name": f.containerName, "source": msg.Source, "log": string(msg.Line), } for k, v := range f.extra { data[k] = v } if msg.PLogMetaData != nil { data["partial_message"] = "true" data["partial_id"] = msg.PLogMetaData.ID data["partial_ordinal"] = strconv.Itoa(msg.PLogMetaData.Ordinal) data["partial_last"] = strconv.FormatBool(msg.PLogMetaData.Last) } ts := msg.Timestamp logger.PutMessage(msg) // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. return f.writer.PostWithTime(f.tag, ts, data) } func (f *fluentd) Close() error { return f.writer.Close() } func (f *fluentd) Name() string { return name } // ValidateLogOpt looks for fluentd specific log option fluentd-address. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": case "env-regex": case "labels": case "labels-regex": case "tag": case addressKey: case asyncKey: case asyncConnectKey: case bufferLimitKey: case maxRetriesKey: case requestAckKey: case retryWaitKey: case subSecondPrecisionKey: // Accepted default: return errors.Errorf("unknown log opt '%s' for fluentd log driver", key) } } _, err := parseConfig(cfg) return err } func parseConfig(cfg map[string]string) (fluent.Config, error) { var config fluent.Config loc, err := parseAddress(cfg[addressKey]) if err != nil { return config, err } bufferLimit := defaultBufferLimit if cfg[bufferLimitKey] != "" { bl64, err := units.RAMInBytes(cfg[bufferLimitKey]) if err != nil { return config, err } bufferLimit = int(bl64) } retryWait := defaultRetryWait if cfg[retryWaitKey] != "" { rwd, err := time.ParseDuration(cfg[retryWaitKey]) if err != nil { return config, err } retryWait = int(rwd.Seconds() * 1000) } maxRetries := defaultMaxRetries if cfg[maxRetriesKey] != "" { mr64, err := strconv.ParseUint(cfg[maxRetriesKey], 10, strconv.IntSize) if err != nil { return config, err } maxRetries = int(mr64) } if cfg[asyncKey] != "" && cfg[asyncConnectKey] != "" { return config, errors.Errorf("conflicting options: cannot specify both '%s' and '%s", asyncKey, asyncConnectKey) } async := false if cfg[asyncKey] != "" { if async, err = strconv.ParseBool(cfg[asyncKey]); err != nil { return config, err } } // TODO fluentd-async-connect is deprecated in driver v1.4.0. Remove after two stable releases asyncConnect := false if cfg[asyncConnectKey] != "" { if asyncConnect, err = strconv.ParseBool(cfg[asyncConnectKey]); err != nil { return config, err } } subSecondPrecision := false if cfg[subSecondPrecisionKey] != "" { if subSecondPrecision, err = strconv.ParseBool(cfg[subSecondPrecisionKey]); err != nil { return config, err } } requestAck := false if cfg[requestAckKey] != "" { if requestAck, err = strconv.ParseBool(cfg[requestAckKey]); err != nil { return config, err } } config = fluent.Config{ FluentPort: loc.port, FluentHost: loc.host, FluentNetwork: loc.protocol, FluentSocketPath: loc.path, BufferLimit: bufferLimit, RetryWait: retryWait, MaxRetry: maxRetries, Async: async, AsyncConnect: asyncConnect, SubSecondPrecision: subSecondPrecision, RequestAck: requestAck, ForceStopAsyncSend: async || asyncConnect, } return config, nil } func parseAddress(address string) (*location, error) { if address == "" { return &location{ protocol: defaultProtocol, host: defaultHost, port: defaultPort, path: "", }, nil } protocol := defaultProtocol givenAddress := address if urlutil.IsTransportURL(address) { url, err := url.Parse(address) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } // unix and unixgram socket if url.Scheme == "unix" || url.Scheme == "unixgram" { return &location{ protocol: url.Scheme, host: "", port: 0, path: url.Path, }, nil } // tcp|udp protocol = url.Scheme address = url.Host } host, port, err := net.SplitHostPort(address) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: defaultPort, path: "", }, nil } portnum, err := strconv.Atoi(port) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: portnum, path: "", }, nil }
akerouanton
0f92cf20442799364539cd1a121339a0395b2b31
f6848ae321b69efb0c1a0304a394e1e3f141b1e1
Hey I'm from the Fluent community... > is it ok if logs are lost when fluentd is down? Is there any other option? If Fluentd/bit is down, then the logs have nowhere to go. You wouldn't want to keep buffering them for too long. Discarding them seems like the right behavior. Is there a way for users to configure how long it will buffer and keep trying to re-connect before it starts just dropping logs- that seems ideal? > although this could be fixed by leveraging fluent/fluent-logger-golang#97 (which is vendored in this PR). What would the user experience look like if we used this?
PettitWesley
4,406
moby/moby
42,979
vendor: github.com/fluent/fluent-logger-golang v1.8.0
Fixes #40063. **- What I did** Update the fluent logger library to v1.8.0. Following commits/PRs were merged since last bump: * [Add callback for error handling when using async](https://github.com/fluent/fluent-logger-golang/pull/97) * [Fix panic when accessing unexported struct field](https://github.com/fluent/fluent-logger-golang/pull/99) * [Properly stop logger during (re)connect failure](https://github.com/fluent/fluent-logger-golang/pull/82) * [Support a TLS-enabled connection](https://github.com/fluent/fluent-logger-golang/pull/107) * [Do not allow writing events after fluent.Close()](https://github.com/fluent/fluent-logger-golang/pull/105) **- How to verify it** Before this change, running the following commands would lead to docker hanging on the `kill` command: ```console $ docker run --rm -it --name test --log-driver=fluentd --log-opt fluentd-address=fluentdhost:24224 --log-opt fluentd-async=true debian /bin/bash -c 'echo test; sleep infinity;' # In another terminal: $ docker kill test ``` **- Description for the changelog** - Forcefully stop fluentd logger when a container stop and the logger is running in async mode.
null
2021-11-01 10:12:54+00:00
2021-12-02 19:51:04+00:00
daemon/logger/fluentd/fluentd.go
// Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( "math" "net" "net/url" "strconv" "strings" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/urlutil" units "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type fluentd struct { tag string containerID string containerName string writer *fluent.Fluent extra map[string]string } type location struct { protocol string host string port int path string } const ( name = "fluentd" defaultBufferLimit = 1024 * 1024 defaultHost = "127.0.0.1" defaultPort = 24224 defaultProtocol = "tcp" // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] defaultMaxRetries = math.MaxInt32 defaultRetryWait = 1000 addressKey = "fluentd-address" asyncKey = "fluentd-async" asyncConnectKey = "fluentd-async-connect" // deprecated option (use fluent-async instead) bufferLimitKey = "fluentd-buffer-limit" maxRetriesKey = "fluentd-max-retries" requestAckKey = "fluentd-request-ack" retryWaitKey = "fluentd-retry-wait" subSecondPrecisionKey = "fluentd-sub-second-precision" ) func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. func New(info logger.Info) (logger.Logger, error) { fluentConfig, err := parseConfig(info.Config) if err != nil { return nil, errdefs.InvalidParameter(err) } tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, errdefs.InvalidParameter(err) } extra, err := info.ExtraAttributes(nil) if err != nil { return nil, errdefs.InvalidParameter(err) } logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: info.ContainerID, containerName: info.ContainerName, writer: log, extra: extra, }, nil } func (f *fluentd) Log(msg *logger.Message) error { data := map[string]string{ "container_id": f.containerID, "container_name": f.containerName, "source": msg.Source, "log": string(msg.Line), } for k, v := range f.extra { data[k] = v } if msg.PLogMetaData != nil { data["partial_message"] = "true" data["partial_id"] = msg.PLogMetaData.ID data["partial_ordinal"] = strconv.Itoa(msg.PLogMetaData.Ordinal) data["partial_last"] = strconv.FormatBool(msg.PLogMetaData.Last) } ts := msg.Timestamp logger.PutMessage(msg) // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. return f.writer.PostWithTime(f.tag, ts, data) } func (f *fluentd) Close() error { return f.writer.Close() } func (f *fluentd) Name() string { return name } // ValidateLogOpt looks for fluentd specific log option fluentd-address. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": case "env-regex": case "labels": case "labels-regex": case "tag": case addressKey: case asyncKey: case asyncConnectKey: case bufferLimitKey: case maxRetriesKey: case requestAckKey: case retryWaitKey: case subSecondPrecisionKey: // Accepted default: return errors.Errorf("unknown log opt '%s' for fluentd log driver", key) } } _, err := parseConfig(cfg) return err } func parseConfig(cfg map[string]string) (fluent.Config, error) { var config fluent.Config loc, err := parseAddress(cfg[addressKey]) if err != nil { return config, err } bufferLimit := defaultBufferLimit if cfg[bufferLimitKey] != "" { bl64, err := units.RAMInBytes(cfg[bufferLimitKey]) if err != nil { return config, err } bufferLimit = int(bl64) } retryWait := defaultRetryWait if cfg[retryWaitKey] != "" { rwd, err := time.ParseDuration(cfg[retryWaitKey]) if err != nil { return config, err } retryWait = int(rwd.Seconds() * 1000) } maxRetries := defaultMaxRetries if cfg[maxRetriesKey] != "" { mr64, err := strconv.ParseUint(cfg[maxRetriesKey], 10, strconv.IntSize) if err != nil { return config, err } maxRetries = int(mr64) } if cfg[asyncKey] != "" && cfg[asyncConnectKey] != "" { return config, errors.Errorf("conflicting options: cannot specify both '%s' and '%s", asyncKey, asyncConnectKey) } async := false if cfg[asyncKey] != "" { if async, err = strconv.ParseBool(cfg[asyncKey]); err != nil { return config, err } } // TODO fluentd-async-connect is deprecated in driver v1.4.0. Remove after two stable releases asyncConnect := false if cfg[asyncConnectKey] != "" { if asyncConnect, err = strconv.ParseBool(cfg[asyncConnectKey]); err != nil { return config, err } } subSecondPrecision := false if cfg[subSecondPrecisionKey] != "" { if subSecondPrecision, err = strconv.ParseBool(cfg[subSecondPrecisionKey]); err != nil { return config, err } } requestAck := false if cfg[requestAckKey] != "" { if requestAck, err = strconv.ParseBool(cfg[requestAckKey]); err != nil { return config, err } } config = fluent.Config{ FluentPort: loc.port, FluentHost: loc.host, FluentNetwork: loc.protocol, FluentSocketPath: loc.path, BufferLimit: bufferLimit, RetryWait: retryWait, MaxRetry: maxRetries, Async: async, AsyncConnect: asyncConnect, SubSecondPrecision: subSecondPrecision, RequestAck: requestAck, } return config, nil } func parseAddress(address string) (*location, error) { if address == "" { return &location{ protocol: defaultProtocol, host: defaultHost, port: defaultPort, path: "", }, nil } protocol := defaultProtocol givenAddress := address if urlutil.IsTransportURL(address) { url, err := url.Parse(address) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } // unix and unixgram socket if url.Scheme == "unix" || url.Scheme == "unixgram" { return &location{ protocol: url.Scheme, host: "", port: 0, path: url.Path, }, nil } // tcp|udp protocol = url.Scheme address = url.Host } host, port, err := net.SplitHostPort(address) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: defaultPort, path: "", }, nil } portnum, err := strconv.Atoi(port) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: portnum, path: "", }, nil }
// Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( "math" "net" "net/url" "strconv" "strings" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/urlutil" units "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type fluentd struct { tag string containerID string containerName string writer *fluent.Fluent extra map[string]string } type location struct { protocol string host string port int path string } const ( name = "fluentd" defaultBufferLimit = 1024 * 1024 defaultHost = "127.0.0.1" defaultPort = 24224 defaultProtocol = "tcp" // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] defaultMaxRetries = math.MaxInt32 defaultRetryWait = 1000 addressKey = "fluentd-address" asyncKey = "fluentd-async" asyncConnectKey = "fluentd-async-connect" // deprecated option (use fluent-async instead) bufferLimitKey = "fluentd-buffer-limit" maxRetriesKey = "fluentd-max-retries" requestAckKey = "fluentd-request-ack" retryWaitKey = "fluentd-retry-wait" subSecondPrecisionKey = "fluentd-sub-second-precision" ) func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. func New(info logger.Info) (logger.Logger, error) { fluentConfig, err := parseConfig(info.Config) if err != nil { return nil, errdefs.InvalidParameter(err) } tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, errdefs.InvalidParameter(err) } extra, err := info.ExtraAttributes(nil) if err != nil { return nil, errdefs.InvalidParameter(err) } logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: info.ContainerID, containerName: info.ContainerName, writer: log, extra: extra, }, nil } func (f *fluentd) Log(msg *logger.Message) error { data := map[string]string{ "container_id": f.containerID, "container_name": f.containerName, "source": msg.Source, "log": string(msg.Line), } for k, v := range f.extra { data[k] = v } if msg.PLogMetaData != nil { data["partial_message"] = "true" data["partial_id"] = msg.PLogMetaData.ID data["partial_ordinal"] = strconv.Itoa(msg.PLogMetaData.Ordinal) data["partial_last"] = strconv.FormatBool(msg.PLogMetaData.Last) } ts := msg.Timestamp logger.PutMessage(msg) // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. return f.writer.PostWithTime(f.tag, ts, data) } func (f *fluentd) Close() error { return f.writer.Close() } func (f *fluentd) Name() string { return name } // ValidateLogOpt looks for fluentd specific log option fluentd-address. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": case "env-regex": case "labels": case "labels-regex": case "tag": case addressKey: case asyncKey: case asyncConnectKey: case bufferLimitKey: case maxRetriesKey: case requestAckKey: case retryWaitKey: case subSecondPrecisionKey: // Accepted default: return errors.Errorf("unknown log opt '%s' for fluentd log driver", key) } } _, err := parseConfig(cfg) return err } func parseConfig(cfg map[string]string) (fluent.Config, error) { var config fluent.Config loc, err := parseAddress(cfg[addressKey]) if err != nil { return config, err } bufferLimit := defaultBufferLimit if cfg[bufferLimitKey] != "" { bl64, err := units.RAMInBytes(cfg[bufferLimitKey]) if err != nil { return config, err } bufferLimit = int(bl64) } retryWait := defaultRetryWait if cfg[retryWaitKey] != "" { rwd, err := time.ParseDuration(cfg[retryWaitKey]) if err != nil { return config, err } retryWait = int(rwd.Seconds() * 1000) } maxRetries := defaultMaxRetries if cfg[maxRetriesKey] != "" { mr64, err := strconv.ParseUint(cfg[maxRetriesKey], 10, strconv.IntSize) if err != nil { return config, err } maxRetries = int(mr64) } if cfg[asyncKey] != "" && cfg[asyncConnectKey] != "" { return config, errors.Errorf("conflicting options: cannot specify both '%s' and '%s", asyncKey, asyncConnectKey) } async := false if cfg[asyncKey] != "" { if async, err = strconv.ParseBool(cfg[asyncKey]); err != nil { return config, err } } // TODO fluentd-async-connect is deprecated in driver v1.4.0. Remove after two stable releases asyncConnect := false if cfg[asyncConnectKey] != "" { if asyncConnect, err = strconv.ParseBool(cfg[asyncConnectKey]); err != nil { return config, err } } subSecondPrecision := false if cfg[subSecondPrecisionKey] != "" { if subSecondPrecision, err = strconv.ParseBool(cfg[subSecondPrecisionKey]); err != nil { return config, err } } requestAck := false if cfg[requestAckKey] != "" { if requestAck, err = strconv.ParseBool(cfg[requestAckKey]); err != nil { return config, err } } config = fluent.Config{ FluentPort: loc.port, FluentHost: loc.host, FluentNetwork: loc.protocol, FluentSocketPath: loc.path, BufferLimit: bufferLimit, RetryWait: retryWait, MaxRetry: maxRetries, Async: async, AsyncConnect: asyncConnect, SubSecondPrecision: subSecondPrecision, RequestAck: requestAck, ForceStopAsyncSend: async || asyncConnect, } return config, nil } func parseAddress(address string) (*location, error) { if address == "" { return &location{ protocol: defaultProtocol, host: defaultHost, port: defaultPort, path: "", }, nil } protocol := defaultProtocol givenAddress := address if urlutil.IsTransportURL(address) { url, err := url.Parse(address) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } // unix and unixgram socket if url.Scheme == "unix" || url.Scheme == "unixgram" { return &location{ protocol: url.Scheme, host: "", port: 0, path: url.Path, }, nil } // tcp|udp protocol = url.Scheme address = url.Host } host, port, err := net.SplitHostPort(address) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: defaultPort, path: "", }, nil } portnum, err := strconv.Atoi(port) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: portnum, path: "", }, nil }
akerouanton
0f92cf20442799364539cd1a121339a0395b2b31
f6848ae321b69efb0c1a0304a394e1e3f141b1e1
> > although this could be fixed by leveraging fluent/fluent-logger-golang#97 (which is vendored in this PR). > > What would the user experience look like if we used this? It'd send logs to journald so it'd be possible to see them through `# journalctl -u docker.service`. > Is there any other option? If Fluentd/bit is down, then the logs have nowhere to go. You wouldn't want to keep buffering them for too long. Discarding them seems like the right behavior. No, logs have nowhere to go and, currently, because of how the exponential back-off retry algorithm is configured, it might block the daemon forever and that's exactly what this PR try to avoid. > Is there a way for users to configure how long it will buffer and keep trying to re-connect before it starts just dropping logs- that seems ideal? Short story: yes, but not implemented in Docker, and no, as it's probably broken in sync mode. Long story: Docker integration of fluentd doesn't provide a parameter to set lib's parameter `WriteTimeout`. Anyway, I believe the `WriteTimeout` mechanism is broken in sync mode under some circumstances: eg. trying to write a new log will change the write deadline of older logs, so under some relatively "high" load, deadlines will be pushed back forever such that no deadlines will be reached. To be more explicit, if the write timeout is set to 2 minutes and at least one log is written every <2m00, then no logs will time out. See [this comment](https://github.com/fluent/fluent-logger-golang/pull/82#issuecomment-949409791) (this needs proper testing and reporting). --- Seeing your comments, I believe the best is to remove this flag. If logging to journald logs that can't be sent to fluentd is a desirable feature, I should open a new issue/PR. But I'd prefer to receive some guidance from docker maintainers about that first (cc @thaJeztah).
akerouanton
4,407
moby/moby
42,979
vendor: github.com/fluent/fluent-logger-golang v1.8.0
Fixes #40063. **- What I did** Update the fluent logger library to v1.8.0. Following commits/PRs were merged since last bump: * [Add callback for error handling when using async](https://github.com/fluent/fluent-logger-golang/pull/97) * [Fix panic when accessing unexported struct field](https://github.com/fluent/fluent-logger-golang/pull/99) * [Properly stop logger during (re)connect failure](https://github.com/fluent/fluent-logger-golang/pull/82) * [Support a TLS-enabled connection](https://github.com/fluent/fluent-logger-golang/pull/107) * [Do not allow writing events after fluent.Close()](https://github.com/fluent/fluent-logger-golang/pull/105) **- How to verify it** Before this change, running the following commands would lead to docker hanging on the `kill` command: ```console $ docker run --rm -it --name test --log-driver=fluentd --log-opt fluentd-address=fluentdhost:24224 --log-opt fluentd-async=true debian /bin/bash -c 'echo test; sleep infinity;' # In another terminal: $ docker kill test ``` **- Description for the changelog** - Forcefully stop fluentd logger when a container stop and the logger is running in async mode.
null
2021-11-01 10:12:54+00:00
2021-12-02 19:51:04+00:00
daemon/logger/fluentd/fluentd.go
// Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( "math" "net" "net/url" "strconv" "strings" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/urlutil" units "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type fluentd struct { tag string containerID string containerName string writer *fluent.Fluent extra map[string]string } type location struct { protocol string host string port int path string } const ( name = "fluentd" defaultBufferLimit = 1024 * 1024 defaultHost = "127.0.0.1" defaultPort = 24224 defaultProtocol = "tcp" // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] defaultMaxRetries = math.MaxInt32 defaultRetryWait = 1000 addressKey = "fluentd-address" asyncKey = "fluentd-async" asyncConnectKey = "fluentd-async-connect" // deprecated option (use fluent-async instead) bufferLimitKey = "fluentd-buffer-limit" maxRetriesKey = "fluentd-max-retries" requestAckKey = "fluentd-request-ack" retryWaitKey = "fluentd-retry-wait" subSecondPrecisionKey = "fluentd-sub-second-precision" ) func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. func New(info logger.Info) (logger.Logger, error) { fluentConfig, err := parseConfig(info.Config) if err != nil { return nil, errdefs.InvalidParameter(err) } tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, errdefs.InvalidParameter(err) } extra, err := info.ExtraAttributes(nil) if err != nil { return nil, errdefs.InvalidParameter(err) } logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: info.ContainerID, containerName: info.ContainerName, writer: log, extra: extra, }, nil } func (f *fluentd) Log(msg *logger.Message) error { data := map[string]string{ "container_id": f.containerID, "container_name": f.containerName, "source": msg.Source, "log": string(msg.Line), } for k, v := range f.extra { data[k] = v } if msg.PLogMetaData != nil { data["partial_message"] = "true" data["partial_id"] = msg.PLogMetaData.ID data["partial_ordinal"] = strconv.Itoa(msg.PLogMetaData.Ordinal) data["partial_last"] = strconv.FormatBool(msg.PLogMetaData.Last) } ts := msg.Timestamp logger.PutMessage(msg) // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. return f.writer.PostWithTime(f.tag, ts, data) } func (f *fluentd) Close() error { return f.writer.Close() } func (f *fluentd) Name() string { return name } // ValidateLogOpt looks for fluentd specific log option fluentd-address. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": case "env-regex": case "labels": case "labels-regex": case "tag": case addressKey: case asyncKey: case asyncConnectKey: case bufferLimitKey: case maxRetriesKey: case requestAckKey: case retryWaitKey: case subSecondPrecisionKey: // Accepted default: return errors.Errorf("unknown log opt '%s' for fluentd log driver", key) } } _, err := parseConfig(cfg) return err } func parseConfig(cfg map[string]string) (fluent.Config, error) { var config fluent.Config loc, err := parseAddress(cfg[addressKey]) if err != nil { return config, err } bufferLimit := defaultBufferLimit if cfg[bufferLimitKey] != "" { bl64, err := units.RAMInBytes(cfg[bufferLimitKey]) if err != nil { return config, err } bufferLimit = int(bl64) } retryWait := defaultRetryWait if cfg[retryWaitKey] != "" { rwd, err := time.ParseDuration(cfg[retryWaitKey]) if err != nil { return config, err } retryWait = int(rwd.Seconds() * 1000) } maxRetries := defaultMaxRetries if cfg[maxRetriesKey] != "" { mr64, err := strconv.ParseUint(cfg[maxRetriesKey], 10, strconv.IntSize) if err != nil { return config, err } maxRetries = int(mr64) } if cfg[asyncKey] != "" && cfg[asyncConnectKey] != "" { return config, errors.Errorf("conflicting options: cannot specify both '%s' and '%s", asyncKey, asyncConnectKey) } async := false if cfg[asyncKey] != "" { if async, err = strconv.ParseBool(cfg[asyncKey]); err != nil { return config, err } } // TODO fluentd-async-connect is deprecated in driver v1.4.0. Remove after two stable releases asyncConnect := false if cfg[asyncConnectKey] != "" { if asyncConnect, err = strconv.ParseBool(cfg[asyncConnectKey]); err != nil { return config, err } } subSecondPrecision := false if cfg[subSecondPrecisionKey] != "" { if subSecondPrecision, err = strconv.ParseBool(cfg[subSecondPrecisionKey]); err != nil { return config, err } } requestAck := false if cfg[requestAckKey] != "" { if requestAck, err = strconv.ParseBool(cfg[requestAckKey]); err != nil { return config, err } } config = fluent.Config{ FluentPort: loc.port, FluentHost: loc.host, FluentNetwork: loc.protocol, FluentSocketPath: loc.path, BufferLimit: bufferLimit, RetryWait: retryWait, MaxRetry: maxRetries, Async: async, AsyncConnect: asyncConnect, SubSecondPrecision: subSecondPrecision, RequestAck: requestAck, } return config, nil } func parseAddress(address string) (*location, error) { if address == "" { return &location{ protocol: defaultProtocol, host: defaultHost, port: defaultPort, path: "", }, nil } protocol := defaultProtocol givenAddress := address if urlutil.IsTransportURL(address) { url, err := url.Parse(address) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } // unix and unixgram socket if url.Scheme == "unix" || url.Scheme == "unixgram" { return &location{ protocol: url.Scheme, host: "", port: 0, path: url.Path, }, nil } // tcp|udp protocol = url.Scheme address = url.Host } host, port, err := net.SplitHostPort(address) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: defaultPort, path: "", }, nil } portnum, err := strconv.Atoi(port) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: portnum, path: "", }, nil }
// Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( "math" "net" "net/url" "strconv" "strings" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/urlutil" units "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type fluentd struct { tag string containerID string containerName string writer *fluent.Fluent extra map[string]string } type location struct { protocol string host string port int path string } const ( name = "fluentd" defaultBufferLimit = 1024 * 1024 defaultHost = "127.0.0.1" defaultPort = 24224 defaultProtocol = "tcp" // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] defaultMaxRetries = math.MaxInt32 defaultRetryWait = 1000 addressKey = "fluentd-address" asyncKey = "fluentd-async" asyncConnectKey = "fluentd-async-connect" // deprecated option (use fluent-async instead) bufferLimitKey = "fluentd-buffer-limit" maxRetriesKey = "fluentd-max-retries" requestAckKey = "fluentd-request-ack" retryWaitKey = "fluentd-retry-wait" subSecondPrecisionKey = "fluentd-sub-second-precision" ) func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. func New(info logger.Info) (logger.Logger, error) { fluentConfig, err := parseConfig(info.Config) if err != nil { return nil, errdefs.InvalidParameter(err) } tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, errdefs.InvalidParameter(err) } extra, err := info.ExtraAttributes(nil) if err != nil { return nil, errdefs.InvalidParameter(err) } logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: info.ContainerID, containerName: info.ContainerName, writer: log, extra: extra, }, nil } func (f *fluentd) Log(msg *logger.Message) error { data := map[string]string{ "container_id": f.containerID, "container_name": f.containerName, "source": msg.Source, "log": string(msg.Line), } for k, v := range f.extra { data[k] = v } if msg.PLogMetaData != nil { data["partial_message"] = "true" data["partial_id"] = msg.PLogMetaData.ID data["partial_ordinal"] = strconv.Itoa(msg.PLogMetaData.Ordinal) data["partial_last"] = strconv.FormatBool(msg.PLogMetaData.Last) } ts := msg.Timestamp logger.PutMessage(msg) // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. return f.writer.PostWithTime(f.tag, ts, data) } func (f *fluentd) Close() error { return f.writer.Close() } func (f *fluentd) Name() string { return name } // ValidateLogOpt looks for fluentd specific log option fluentd-address. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": case "env-regex": case "labels": case "labels-regex": case "tag": case addressKey: case asyncKey: case asyncConnectKey: case bufferLimitKey: case maxRetriesKey: case requestAckKey: case retryWaitKey: case subSecondPrecisionKey: // Accepted default: return errors.Errorf("unknown log opt '%s' for fluentd log driver", key) } } _, err := parseConfig(cfg) return err } func parseConfig(cfg map[string]string) (fluent.Config, error) { var config fluent.Config loc, err := parseAddress(cfg[addressKey]) if err != nil { return config, err } bufferLimit := defaultBufferLimit if cfg[bufferLimitKey] != "" { bl64, err := units.RAMInBytes(cfg[bufferLimitKey]) if err != nil { return config, err } bufferLimit = int(bl64) } retryWait := defaultRetryWait if cfg[retryWaitKey] != "" { rwd, err := time.ParseDuration(cfg[retryWaitKey]) if err != nil { return config, err } retryWait = int(rwd.Seconds() * 1000) } maxRetries := defaultMaxRetries if cfg[maxRetriesKey] != "" { mr64, err := strconv.ParseUint(cfg[maxRetriesKey], 10, strconv.IntSize) if err != nil { return config, err } maxRetries = int(mr64) } if cfg[asyncKey] != "" && cfg[asyncConnectKey] != "" { return config, errors.Errorf("conflicting options: cannot specify both '%s' and '%s", asyncKey, asyncConnectKey) } async := false if cfg[asyncKey] != "" { if async, err = strconv.ParseBool(cfg[asyncKey]); err != nil { return config, err } } // TODO fluentd-async-connect is deprecated in driver v1.4.0. Remove after two stable releases asyncConnect := false if cfg[asyncConnectKey] != "" { if asyncConnect, err = strconv.ParseBool(cfg[asyncConnectKey]); err != nil { return config, err } } subSecondPrecision := false if cfg[subSecondPrecisionKey] != "" { if subSecondPrecision, err = strconv.ParseBool(cfg[subSecondPrecisionKey]); err != nil { return config, err } } requestAck := false if cfg[requestAckKey] != "" { if requestAck, err = strconv.ParseBool(cfg[requestAckKey]); err != nil { return config, err } } config = fluent.Config{ FluentPort: loc.port, FluentHost: loc.host, FluentNetwork: loc.protocol, FluentSocketPath: loc.path, BufferLimit: bufferLimit, RetryWait: retryWait, MaxRetry: maxRetries, Async: async, AsyncConnect: asyncConnect, SubSecondPrecision: subSecondPrecision, RequestAck: requestAck, ForceStopAsyncSend: async || asyncConnect, } return config, nil } func parseAddress(address string) (*location, error) { if address == "" { return &location{ protocol: defaultProtocol, host: defaultHost, port: defaultPort, path: "", }, nil } protocol := defaultProtocol givenAddress := address if urlutil.IsTransportURL(address) { url, err := url.Parse(address) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } // unix and unixgram socket if url.Scheme == "unix" || url.Scheme == "unixgram" { return &location{ protocol: url.Scheme, host: "", port: 0, path: url.Path, }, nil } // tcp|udp protocol = url.Scheme address = url.Host } host, port, err := net.SplitHostPort(address) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: defaultPort, path: "", }, nil } portnum, err := strconv.Atoi(port) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: portnum, path: "", }, nil }
akerouanton
0f92cf20442799364539cd1a121339a0395b2b31
f6848ae321b69efb0c1a0304a394e1e3f141b1e1
> Seeing your comments, I believe the best is to remove this flag. If logging to journald logs that can't be sent to fluentd is a desirable feature, I should open a new issue/PR. But I'd prefer to receive some guidance from docker maintainers about that first I agree with this. Sounds like no one would want this current option. I am curious if some sort of max configurable retry attempts is possible? If not, an optional dump logs to the docker journald log stream sounds like a useful option (default false tho). I think in practice very few of my users would use that, since they probably have a ton of container logs wouldn't want to risk the docker logs with their volume. If outputting to an option file or even falling back to the json log driver was possible... I think that would be an ideal user experience. But I would not put any great urgency on implementing that. Fixing the current bug here is most critical.
PettitWesley
4,408
moby/moby
42,979
vendor: github.com/fluent/fluent-logger-golang v1.8.0
Fixes #40063. **- What I did** Update the fluent logger library to v1.8.0. Following commits/PRs were merged since last bump: * [Add callback for error handling when using async](https://github.com/fluent/fluent-logger-golang/pull/97) * [Fix panic when accessing unexported struct field](https://github.com/fluent/fluent-logger-golang/pull/99) * [Properly stop logger during (re)connect failure](https://github.com/fluent/fluent-logger-golang/pull/82) * [Support a TLS-enabled connection](https://github.com/fluent/fluent-logger-golang/pull/107) * [Do not allow writing events after fluent.Close()](https://github.com/fluent/fluent-logger-golang/pull/105) **- How to verify it** Before this change, running the following commands would lead to docker hanging on the `kill` command: ```console $ docker run --rm -it --name test --log-driver=fluentd --log-opt fluentd-address=fluentdhost:24224 --log-opt fluentd-async=true debian /bin/bash -c 'echo test; sleep infinity;' # In another terminal: $ docker kill test ``` **- Description for the changelog** - Forcefully stop fluentd logger when a container stop and the logger is running in async mode.
null
2021-11-01 10:12:54+00:00
2021-12-02 19:51:04+00:00
daemon/logger/fluentd/fluentd.go
// Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( "math" "net" "net/url" "strconv" "strings" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/urlutil" units "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type fluentd struct { tag string containerID string containerName string writer *fluent.Fluent extra map[string]string } type location struct { protocol string host string port int path string } const ( name = "fluentd" defaultBufferLimit = 1024 * 1024 defaultHost = "127.0.0.1" defaultPort = 24224 defaultProtocol = "tcp" // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] defaultMaxRetries = math.MaxInt32 defaultRetryWait = 1000 addressKey = "fluentd-address" asyncKey = "fluentd-async" asyncConnectKey = "fluentd-async-connect" // deprecated option (use fluent-async instead) bufferLimitKey = "fluentd-buffer-limit" maxRetriesKey = "fluentd-max-retries" requestAckKey = "fluentd-request-ack" retryWaitKey = "fluentd-retry-wait" subSecondPrecisionKey = "fluentd-sub-second-precision" ) func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. func New(info logger.Info) (logger.Logger, error) { fluentConfig, err := parseConfig(info.Config) if err != nil { return nil, errdefs.InvalidParameter(err) } tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, errdefs.InvalidParameter(err) } extra, err := info.ExtraAttributes(nil) if err != nil { return nil, errdefs.InvalidParameter(err) } logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: info.ContainerID, containerName: info.ContainerName, writer: log, extra: extra, }, nil } func (f *fluentd) Log(msg *logger.Message) error { data := map[string]string{ "container_id": f.containerID, "container_name": f.containerName, "source": msg.Source, "log": string(msg.Line), } for k, v := range f.extra { data[k] = v } if msg.PLogMetaData != nil { data["partial_message"] = "true" data["partial_id"] = msg.PLogMetaData.ID data["partial_ordinal"] = strconv.Itoa(msg.PLogMetaData.Ordinal) data["partial_last"] = strconv.FormatBool(msg.PLogMetaData.Last) } ts := msg.Timestamp logger.PutMessage(msg) // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. return f.writer.PostWithTime(f.tag, ts, data) } func (f *fluentd) Close() error { return f.writer.Close() } func (f *fluentd) Name() string { return name } // ValidateLogOpt looks for fluentd specific log option fluentd-address. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": case "env-regex": case "labels": case "labels-regex": case "tag": case addressKey: case asyncKey: case asyncConnectKey: case bufferLimitKey: case maxRetriesKey: case requestAckKey: case retryWaitKey: case subSecondPrecisionKey: // Accepted default: return errors.Errorf("unknown log opt '%s' for fluentd log driver", key) } } _, err := parseConfig(cfg) return err } func parseConfig(cfg map[string]string) (fluent.Config, error) { var config fluent.Config loc, err := parseAddress(cfg[addressKey]) if err != nil { return config, err } bufferLimit := defaultBufferLimit if cfg[bufferLimitKey] != "" { bl64, err := units.RAMInBytes(cfg[bufferLimitKey]) if err != nil { return config, err } bufferLimit = int(bl64) } retryWait := defaultRetryWait if cfg[retryWaitKey] != "" { rwd, err := time.ParseDuration(cfg[retryWaitKey]) if err != nil { return config, err } retryWait = int(rwd.Seconds() * 1000) } maxRetries := defaultMaxRetries if cfg[maxRetriesKey] != "" { mr64, err := strconv.ParseUint(cfg[maxRetriesKey], 10, strconv.IntSize) if err != nil { return config, err } maxRetries = int(mr64) } if cfg[asyncKey] != "" && cfg[asyncConnectKey] != "" { return config, errors.Errorf("conflicting options: cannot specify both '%s' and '%s", asyncKey, asyncConnectKey) } async := false if cfg[asyncKey] != "" { if async, err = strconv.ParseBool(cfg[asyncKey]); err != nil { return config, err } } // TODO fluentd-async-connect is deprecated in driver v1.4.0. Remove after two stable releases asyncConnect := false if cfg[asyncConnectKey] != "" { if asyncConnect, err = strconv.ParseBool(cfg[asyncConnectKey]); err != nil { return config, err } } subSecondPrecision := false if cfg[subSecondPrecisionKey] != "" { if subSecondPrecision, err = strconv.ParseBool(cfg[subSecondPrecisionKey]); err != nil { return config, err } } requestAck := false if cfg[requestAckKey] != "" { if requestAck, err = strconv.ParseBool(cfg[requestAckKey]); err != nil { return config, err } } config = fluent.Config{ FluentPort: loc.port, FluentHost: loc.host, FluentNetwork: loc.protocol, FluentSocketPath: loc.path, BufferLimit: bufferLimit, RetryWait: retryWait, MaxRetry: maxRetries, Async: async, AsyncConnect: asyncConnect, SubSecondPrecision: subSecondPrecision, RequestAck: requestAck, } return config, nil } func parseAddress(address string) (*location, error) { if address == "" { return &location{ protocol: defaultProtocol, host: defaultHost, port: defaultPort, path: "", }, nil } protocol := defaultProtocol givenAddress := address if urlutil.IsTransportURL(address) { url, err := url.Parse(address) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } // unix and unixgram socket if url.Scheme == "unix" || url.Scheme == "unixgram" { return &location{ protocol: url.Scheme, host: "", port: 0, path: url.Path, }, nil } // tcp|udp protocol = url.Scheme address = url.Host } host, port, err := net.SplitHostPort(address) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: defaultPort, path: "", }, nil } portnum, err := strconv.Atoi(port) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: portnum, path: "", }, nil }
// Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( "math" "net" "net/url" "strconv" "strings" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/urlutil" units "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type fluentd struct { tag string containerID string containerName string writer *fluent.Fluent extra map[string]string } type location struct { protocol string host string port int path string } const ( name = "fluentd" defaultBufferLimit = 1024 * 1024 defaultHost = "127.0.0.1" defaultPort = 24224 defaultProtocol = "tcp" // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] defaultMaxRetries = math.MaxInt32 defaultRetryWait = 1000 addressKey = "fluentd-address" asyncKey = "fluentd-async" asyncConnectKey = "fluentd-async-connect" // deprecated option (use fluent-async instead) bufferLimitKey = "fluentd-buffer-limit" maxRetriesKey = "fluentd-max-retries" requestAckKey = "fluentd-request-ack" retryWaitKey = "fluentd-retry-wait" subSecondPrecisionKey = "fluentd-sub-second-precision" ) func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. func New(info logger.Info) (logger.Logger, error) { fluentConfig, err := parseConfig(info.Config) if err != nil { return nil, errdefs.InvalidParameter(err) } tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, errdefs.InvalidParameter(err) } extra, err := info.ExtraAttributes(nil) if err != nil { return nil, errdefs.InvalidParameter(err) } logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: info.ContainerID, containerName: info.ContainerName, writer: log, extra: extra, }, nil } func (f *fluentd) Log(msg *logger.Message) error { data := map[string]string{ "container_id": f.containerID, "container_name": f.containerName, "source": msg.Source, "log": string(msg.Line), } for k, v := range f.extra { data[k] = v } if msg.PLogMetaData != nil { data["partial_message"] = "true" data["partial_id"] = msg.PLogMetaData.ID data["partial_ordinal"] = strconv.Itoa(msg.PLogMetaData.Ordinal) data["partial_last"] = strconv.FormatBool(msg.PLogMetaData.Last) } ts := msg.Timestamp logger.PutMessage(msg) // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. return f.writer.PostWithTime(f.tag, ts, data) } func (f *fluentd) Close() error { return f.writer.Close() } func (f *fluentd) Name() string { return name } // ValidateLogOpt looks for fluentd specific log option fluentd-address. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": case "env-regex": case "labels": case "labels-regex": case "tag": case addressKey: case asyncKey: case asyncConnectKey: case bufferLimitKey: case maxRetriesKey: case requestAckKey: case retryWaitKey: case subSecondPrecisionKey: // Accepted default: return errors.Errorf("unknown log opt '%s' for fluentd log driver", key) } } _, err := parseConfig(cfg) return err } func parseConfig(cfg map[string]string) (fluent.Config, error) { var config fluent.Config loc, err := parseAddress(cfg[addressKey]) if err != nil { return config, err } bufferLimit := defaultBufferLimit if cfg[bufferLimitKey] != "" { bl64, err := units.RAMInBytes(cfg[bufferLimitKey]) if err != nil { return config, err } bufferLimit = int(bl64) } retryWait := defaultRetryWait if cfg[retryWaitKey] != "" { rwd, err := time.ParseDuration(cfg[retryWaitKey]) if err != nil { return config, err } retryWait = int(rwd.Seconds() * 1000) } maxRetries := defaultMaxRetries if cfg[maxRetriesKey] != "" { mr64, err := strconv.ParseUint(cfg[maxRetriesKey], 10, strconv.IntSize) if err != nil { return config, err } maxRetries = int(mr64) } if cfg[asyncKey] != "" && cfg[asyncConnectKey] != "" { return config, errors.Errorf("conflicting options: cannot specify both '%s' and '%s", asyncKey, asyncConnectKey) } async := false if cfg[asyncKey] != "" { if async, err = strconv.ParseBool(cfg[asyncKey]); err != nil { return config, err } } // TODO fluentd-async-connect is deprecated in driver v1.4.0. Remove after two stable releases asyncConnect := false if cfg[asyncConnectKey] != "" { if asyncConnect, err = strconv.ParseBool(cfg[asyncConnectKey]); err != nil { return config, err } } subSecondPrecision := false if cfg[subSecondPrecisionKey] != "" { if subSecondPrecision, err = strconv.ParseBool(cfg[subSecondPrecisionKey]); err != nil { return config, err } } requestAck := false if cfg[requestAckKey] != "" { if requestAck, err = strconv.ParseBool(cfg[requestAckKey]); err != nil { return config, err } } config = fluent.Config{ FluentPort: loc.port, FluentHost: loc.host, FluentNetwork: loc.protocol, FluentSocketPath: loc.path, BufferLimit: bufferLimit, RetryWait: retryWait, MaxRetry: maxRetries, Async: async, AsyncConnect: asyncConnect, SubSecondPrecision: subSecondPrecision, RequestAck: requestAck, ForceStopAsyncSend: async || asyncConnect, } return config, nil } func parseAddress(address string) (*location, error) { if address == "" { return &location{ protocol: defaultProtocol, host: defaultHost, port: defaultPort, path: "", }, nil } protocol := defaultProtocol givenAddress := address if urlutil.IsTransportURL(address) { url, err := url.Parse(address) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } // unix and unixgram socket if url.Scheme == "unix" || url.Scheme == "unixgram" { return &location{ protocol: url.Scheme, host: "", port: 0, path: url.Path, }, nil } // tcp|udp protocol = url.Scheme address = url.Host } host, port, err := net.SplitHostPort(address) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: defaultPort, path: "", }, nil } portnum, err := strconv.Atoi(port) if err != nil { return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) } return &location{ protocol: protocol, host: host, port: portnum, path: "", }, nil }
akerouanton
0f92cf20442799364539cd1a121339a0395b2b31
f6848ae321b69efb0c1a0304a394e1e3f141b1e1
**The following comment is kind of long and rambly and so I want to be clear that helping me understand is fully optional** ... I really appreciate the work you are doing here and trust you as a maintainer/owner of this code... I had meant to try to read your code and try to deeply understand what it was actually doing... but was always busy with other work. I'm not a maintainer here and I don't have power to block or approve this, so I want to say that helping me understand is fully optional. > Docker integration of fluentd doesn't provide a parameter to set lib's parameter WriteTimeout. Anyway, I believe the WriteTimeout mechanism is broken in sync mode under some circumstances: eg. trying to write a new log will change the write deadline of older logs, so under some relatively "high" load, deadlines will be pushed back forever such that no deadlines will be reached. To be more explicit, if the write timeout is set to 2 minutes and at least one log is written every <2m00, then no logs will time out. See this comment (this needs proper testing and reporting). Sorry I don't follow this and the linked comment. To me, it feels like if there's a single connection, and that connection blocks for a while, it should be possible to have a global timeout on that connection. Or is that not how it works? There's no single connection, each batch of logs is sent by a unique goroutine that can't safely have context on what others are seeing? And you need to tell Docker to give up at some point, and there's no way to coordinate around that? There's no way to have a "max async connection retries"? Are the only solutions possible- give up quickly, or risk being blocked forever? So the "Fluent" tool that I maintain is "Fluent Bit". Which is a low level C program that has multi-threading too. At a high level, this is how it works: Log Producer => Fluent Bit (thing that has multiple threads accepting logs, and multiple threads outputting logs) => destination And basically the way it works is that on the input side you can have timeouts on connections, and on the output side you can have a configurable max retries. So that even though each thread has no global context, it won't retry its data more than N times. And as far as shutting things down go, you set it up so that you stop your producer and Fluent Bit together. And if something goes wrong, Fluent Bit can't block the log producer from being shut down. In this case I think we have this diagram: Container stdout/stderr => Fluentd log driver => Fluentd/Fluent Bit instance I wonder if its the same style of engineering problem... and thus that the following should be possible: 1. Log driver issues can never stop the container from shutting down. Why does that happen in docker?? 2. You handle the destination being down with a max retries per chunk/log event
PettitWesley
4,409
moby/moby
42,960
Fix TestCreateServiceSecretFileMode, TestCreateServiceConfigFileMode
hopefully fixes https://github.com/moby/moby/issues/37132 Looks like this test was broken from the start, and fully relied on a race condition. (Test was added in 65ee7fff02111bf696bc2fec442d07c2957f4151 / https://github.com/moby/moby/pull/36130) The problem is in the service's command: `ls -l /etc/config || /bin/top`, which will either: - exit immediately if the secret is mounted correctly at `/etc/config` (which it should) - keep running with `/bin/top` if the above failed After the service is created, the test enters a race-condition, checking for 1 task to be running (which it ocassionally is), after which it proceeds, and looks up the list of tasks of the service, to get the log output of `ls -l /etc/config`. This is another race: first of all, the original filter for that task lookup did not filter by `running`, so it would pick "any" task of the service (either failed, running, or "completed" (successfully exited) tasks). In the meantime though, SwarmKit kept reconciling the service, and creating new tasks, so even if the test was able to get the ID of the correct task, that task may already have been exited, and removed (task-limit is 5 by default), so only if the test was "lucky", it would be able to get the logs, but of course, chances were likely that it would be "too late", and the task already gone. The problem can be easily reproduced when running the steps manually: echo 'CONFIG' | docker config create myconfig - docker service create --config source=myconfig,target=/etc/config,mode=0777 --name myservice busybox sh -c 'ls -l /etc/config || /bin/top' The above creates the service, but it keeps retrying, because each task exits immediately (followed by SwarmKit reconciling and starting a new task); mjntpfkkyuuc1dpay4h00c4oo overall progress: 0 out of 1 tasks 1/1: ready [======================================> ] verify: Detected task failure ^COperation continuing in background. Use `docker service ps mjntpfkkyuuc1dpay4h00c4oo` to check progress. And checking the tasks for the service reveals that tasks exit cleanly (no error), but _do exit_, so swarm just keeps up reconciling, and spinning up new tasks; docker service ps myservice --no-trunc ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS 2wmcuv4vffnet8nybg3he4v9n myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Ready Ready less than a second ago 5p8b006uec125iq2892lxay64 \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete less than a second ago k8lpsvlak4b3nil0zfkexw61p \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 6 seconds ago vsunl5pi7e2n9ol3p89kvj6pn \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 11 seconds ago orxl8b6kt2l6dfznzzd4lij4s \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 17 seconds ago This patch changes the service's command to `sleep`, so that a successful task (after successfully performing `ls -l /etc/config`) continues to be running until the service is deleted. With that change, the service should (usually) reconcile immediately, which removes the race condition, and should also make it faster :) This patch changes the tests to use client.ServiceLogs() instead of using the service's tasklist to directly access container logs. This should also fix some failures that happened if some tasks failed to start before reconciling, in which case client.TaskList() (with the current filters), could return more tasks than anticipated (as it also contained the exited tasks); === RUN TestCreateServiceSecretFileMode create_test.go:291: assertion failed: 2 (int) != 1 (int) --- FAIL: TestCreateServiceSecretFileMode (7.88s) === RUN TestCreateServiceConfigFileMode create_test.go:355: assertion failed: 2 (int) != 1 (int) --- FAIL: TestCreateServiceConfigFileMode (7.87s)
null
2021-10-22 13:38:08+00:00
2021-10-27 10:29:24+00:00
integration/service/create_test.go
package service // import "github.com/docker/docker/integration/service" import ( "context" "fmt" "io" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/strslice" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/client" "github.com/docker/docker/errdefs" "github.com/docker/docker/integration/internal/network" "github.com/docker/docker/integration/internal/swarm" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestServiceCreateInit(t *testing.T) { defer setupTest(t)() t.Run("daemonInitDisabled", testServiceCreateInit(false)) t.Run("daemonInitEnabled", testServiceCreateInit(true)) } func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) { return func(t *testing.T) { var ops = []daemon.Option{} if daemonEnabled { ops = append(ops, daemon.WithInit()) } d := swarm.NewSwarm(t, testEnv, ops...) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() booleanTrue := true booleanFalse := false serviceID := swarm.CreateService(t, d) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i := inspectServiceContainer(t, client, serviceID) // HostConfig.Init == nil means that it delegates to daemon configuration assert.Check(t, i.HostConfig.Init == nil) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(true, *i.HostConfig.Init)) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(false, *i.HostConfig.Init)) } } func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON { t.Helper() filter := filters.NewArgs() filter.Add("label", fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID)) containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{Filters: filter}) assert.NilError(t, err) assert.Check(t, is.Len(containers, 1)) i, err := client.ContainerInspect(context.Background(), containers[0].ID) assert.NilError(t, err) return i } func TestCreateServiceMultipleTimes(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() overlayName := "overlay1_" + t.Name() overlayID := network.CreateNoError(ctx, t, client, overlayName, network.WithCheckDuplicate(), network.WithDriver("overlay"), ) var instances uint64 = 4 serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(overlayName), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) err = client.ServiceRemove(context.Background(), serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) serviceID2 := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll) err = client.ServiceRemove(context.Background(), serviceID2) assert.NilError(t, err) // we can't just wait on no tasks for the service, counter-intuitively. // Tasks may briefly exist but not show up, if they are are in the process // of being deallocated. To avoid this case, we should retry network remove // a few times, to give tasks time to be deallcoated poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll) for retry := 0; retry < 5; retry++ { err = client.NetworkRemove(context.Background(), overlayID) // TODO(dperny): using strings.Contains for error checking is awful, // but so is the fact that swarm functions don't return errdefs errors. // I don't have time at this moment to fix the latter, so I guess I'll // go with the former. // // The full error we're looking for is something like this: // // Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v // // The safest way to catch this, I think, will be to match on "is in // use by", as this is an uninterrupted string that best identifies // this error. if err == nil || !strings.Contains(err.Error(), "is in use by") { // if there is no error, or the error isn't this kind of error, // then we'll break the loop body, and either fail the test or // continue. break } } assert.NilError(t, err) poll.WaitOn(t, network.IsRemoved(context.Background(), client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceConflict(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) c := d.NewClientT(t) defer c.Close() ctx := context.Background() serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithName(serviceName), } swarm.CreateService(t, d, serviceSpec...) spec := swarm.CreateServiceSpec(t, serviceSpec...) _, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{}) assert.Check(t, errdefs.IsConflict(err)) assert.ErrorContains(t, err, "service "+serviceName+" already exists") } func TestCreateServiceMaxReplicas(t *testing.T) { defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() var maxReplicas uint64 = 2 serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(maxReplicas), swarm.ServiceWithMaxReplicas(maxReplicas), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, maxReplicas), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) } func TestCreateWithDuplicateNetworkNames(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() name := "foo_" + t.Name() n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) // Duplicates with name but with different driver n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay")) // Create Service with the same name var instances uint64 = 1 serviceName := "top_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(name), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target)) // Remove Service, and wait for its tasks to be removed err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) // Remove networks err = client.NetworkRemove(context.Background(), n3) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n2) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n1) assert.NilError(t, err) // Make sure networks have been destroyed. poll.WaitOn(t, network.IsRemoved(context.Background(), client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceSecretFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() secretName := "TestSecret_" + t.Name() secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ Annotations: swarmtypes.Annotations{ Name: secretName, }, Data: []byte("TESTSECRET"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret || /bin/top"}), swarm.ServiceWithSecret(&swarmtypes.SecretReference{ File: &swarmtypes.SecretReferenceFileTarget{ Name: "/etc/secret", UID: "0", GID: "0", Mode: 0777, }, SecretID: secretResp.ID, SecretName: secretName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) err = client.SecretRemove(ctx, secretName) assert.NilError(t, err) } func TestCreateServiceConfigFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() configName := "TestConfig_" + t.Name() configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ Annotations: swarmtypes.Annotations{ Name: configName, }, Data: []byte("TESTCONFIG"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config || /bin/top"}), swarm.ServiceWithReplicas(instances), swarm.ServiceWithConfig(&swarmtypes.ConfigReference{ File: &swarmtypes.ConfigReferenceFileTarget{ Name: "/etc/config", UID: "0", GID: "0", Mode: 0777, }, ConfigID: configResp.ID, ConfigName: configName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID)) err = client.ConfigRemove(ctx, configName) assert.NilError(t, err) } // TestServiceCreateSysctls tests that a service created with sysctl options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the sysctl option // // {"net.ipv4.ip_nonlocal_bind": "0"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // sysctl option with the correct value, we can assume that the sysctl has been // plumbed correctly. // // Next, we'll remove that service and create a new service with that option // set to 1. This means that no matter what the default is, we can be confident // that the sysctl option is applying as intended. // // Additionally, we'll do service and task inspects to verify that the inspect // output includes the desired sysctl option. // // We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly // confident won't be modified by the container runtime, and won't blow // anything up in the test environment func TestCreateServiceSysctls(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "setting service sysctls is unsupported before api v1.40", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // run thie block twice, so that no matter what the default value of // net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl // options works for _, expected := range []string{"0", "1"} { // store the map we're going to be using everywhere. expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected} // Create the service with the sysctl options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithSysctls(expectedSysctls), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the sysctl option set? // 2. Does the task have the sysctl in the spec? // 3. Does the service have the sysctl in the spec? // // if all 3 of these things are true, we know that the sysctl has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the sysctl set on the container inspect, // we know that the sysctl is plumbed correctly. everything below that // level has been tested elsewhere. (thanks @thaJeztah, because an // earlier version of this test had to get container logs and was much // more complex) // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the sysctl option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls) // verify that the task has the sysctl option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls) // verify that the service also has the sysctl set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls, ) } } // TestServiceCreateCapabilities tests that a service created with capabilities options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the capabilities option // // []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // capabilities option with the correct value, we can assume that the capabilities has been // plumbed correctly. func TestCreateServiceCapabilities(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"), "setting service capabilities is unsupported before api v1.41", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // store the map we're going to be using everywhere. capAdd := []string{"CAP_SYS_CHROOT"} capDrop := []string{"CAP_NET_RAW"} // Create the service with the capabilities options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithCapabilities(capAdd, capDrop), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the capabilities option set? // 2. Does the task have the capabilities in the spec? // 3. Does the service have the capabilities in the spec? // // if all 3 of these things are true, we know that the capabilities has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the capabilities set on the container inspect, // we know that the capabilities is plumbed correctly. everything below that // level has been tested elsewhere. // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the capabilities option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.CapAdd, strslice.StrSlice(capAdd)) assert.DeepEqual(t, ctnr.HostConfig.CapDrop, strslice.StrSlice(capDrop)) // verify that the task has the capabilities option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityDrop, capDrop) // verify that the service also has the capabilities set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, capDrop) }
package service // import "github.com/docker/docker/integration/service" import ( "context" "fmt" "io" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/strslice" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/client" "github.com/docker/docker/errdefs" "github.com/docker/docker/integration/internal/network" "github.com/docker/docker/integration/internal/swarm" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestServiceCreateInit(t *testing.T) { defer setupTest(t)() t.Run("daemonInitDisabled", testServiceCreateInit(false)) t.Run("daemonInitEnabled", testServiceCreateInit(true)) } func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) { return func(t *testing.T) { var ops = []daemon.Option{} if daemonEnabled { ops = append(ops, daemon.WithInit()) } d := swarm.NewSwarm(t, testEnv, ops...) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() booleanTrue := true booleanFalse := false serviceID := swarm.CreateService(t, d) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i := inspectServiceContainer(t, client, serviceID) // HostConfig.Init == nil means that it delegates to daemon configuration assert.Check(t, i.HostConfig.Init == nil) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(true, *i.HostConfig.Init)) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(false, *i.HostConfig.Init)) } } func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON { t.Helper() filter := filters.NewArgs() filter.Add("label", fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID)) containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{Filters: filter}) assert.NilError(t, err) assert.Check(t, is.Len(containers, 1)) i, err := client.ContainerInspect(context.Background(), containers[0].ID) assert.NilError(t, err) return i } func TestCreateServiceMultipleTimes(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() overlayName := "overlay1_" + t.Name() overlayID := network.CreateNoError(ctx, t, client, overlayName, network.WithCheckDuplicate(), network.WithDriver("overlay"), ) var instances uint64 = 4 serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(overlayName), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) err = client.ServiceRemove(context.Background(), serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) serviceID2 := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll) err = client.ServiceRemove(context.Background(), serviceID2) assert.NilError(t, err) // we can't just wait on no tasks for the service, counter-intuitively. // Tasks may briefly exist but not show up, if they are are in the process // of being deallocated. To avoid this case, we should retry network remove // a few times, to give tasks time to be deallcoated poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll) for retry := 0; retry < 5; retry++ { err = client.NetworkRemove(context.Background(), overlayID) // TODO(dperny): using strings.Contains for error checking is awful, // but so is the fact that swarm functions don't return errdefs errors. // I don't have time at this moment to fix the latter, so I guess I'll // go with the former. // // The full error we're looking for is something like this: // // Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v // // The safest way to catch this, I think, will be to match on "is in // use by", as this is an uninterrupted string that best identifies // this error. if err == nil || !strings.Contains(err.Error(), "is in use by") { // if there is no error, or the error isn't this kind of error, // then we'll break the loop body, and either fail the test or // continue. break } } assert.NilError(t, err) poll.WaitOn(t, network.IsRemoved(context.Background(), client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceConflict(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) c := d.NewClientT(t) defer c.Close() ctx := context.Background() serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithName(serviceName), } swarm.CreateService(t, d, serviceSpec...) spec := swarm.CreateServiceSpec(t, serviceSpec...) _, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{}) assert.Check(t, errdefs.IsConflict(err)) assert.ErrorContains(t, err, "service "+serviceName+" already exists") } func TestCreateServiceMaxReplicas(t *testing.T) { defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() var maxReplicas uint64 = 2 serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(maxReplicas), swarm.ServiceWithMaxReplicas(maxReplicas), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, maxReplicas), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) } func TestCreateWithDuplicateNetworkNames(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() name := "foo_" + t.Name() n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) // Duplicates with name but with different driver n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay")) // Create Service with the same name var instances uint64 = 1 serviceName := "top_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(name), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target)) // Remove Service, and wait for its tasks to be removed err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) // Remove networks err = client.NetworkRemove(context.Background(), n3) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n2) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n1) assert.NilError(t, err) // Make sure networks have been destroyed. poll.WaitOn(t, network.IsRemoved(context.Background(), client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceSecretFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() secretName := "TestSecret_" + t.Name() secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ Annotations: swarmtypes.Annotations{ Name: secretName, }, Data: []byte("TESTSECRET"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret && sleep inf"}), swarm.ServiceWithSecret(&swarmtypes.SecretReference{ File: &swarmtypes.SecretReferenceFileTarget{ Name: "/etc/secret", UID: "0", GID: "0", Mode: 0777, }, SecretID: secretResp.ID, SecretName: secretName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) body, err := client.ServiceLogs(ctx, serviceID, types.ContainerLogsOptions{ Tail: "1", ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) err = client.SecretRemove(ctx, secretName) assert.NilError(t, err) } func TestCreateServiceConfigFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() configName := "TestConfig_" + t.Name() configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ Annotations: swarmtypes.Annotations{ Name: configName, }, Data: []byte("TESTCONFIG"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config && sleep inf"}), swarm.ServiceWithReplicas(instances), swarm.ServiceWithConfig(&swarmtypes.ConfigReference{ File: &swarmtypes.ConfigReferenceFileTarget{ Name: "/etc/config", UID: "0", GID: "0", Mode: 0777, }, ConfigID: configResp.ID, ConfigName: configName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) body, err := client.ServiceLogs(ctx, serviceID, types.ContainerLogsOptions{ Tail: "1", ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID)) err = client.ConfigRemove(ctx, configName) assert.NilError(t, err) } // TestServiceCreateSysctls tests that a service created with sysctl options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the sysctl option // // {"net.ipv4.ip_nonlocal_bind": "0"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // sysctl option with the correct value, we can assume that the sysctl has been // plumbed correctly. // // Next, we'll remove that service and create a new service with that option // set to 1. This means that no matter what the default is, we can be confident // that the sysctl option is applying as intended. // // Additionally, we'll do service and task inspects to verify that the inspect // output includes the desired sysctl option. // // We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly // confident won't be modified by the container runtime, and won't blow // anything up in the test environment func TestCreateServiceSysctls(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "setting service sysctls is unsupported before api v1.40", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // run thie block twice, so that no matter what the default value of // net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl // options works for _, expected := range []string{"0", "1"} { // store the map we're going to be using everywhere. expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected} // Create the service with the sysctl options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithSysctls(expectedSysctls), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the sysctl option set? // 2. Does the task have the sysctl in the spec? // 3. Does the service have the sysctl in the spec? // // if all 3 of these things are true, we know that the sysctl has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the sysctl set on the container inspect, // we know that the sysctl is plumbed correctly. everything below that // level has been tested elsewhere. (thanks @thaJeztah, because an // earlier version of this test had to get container logs and was much // more complex) // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the sysctl option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls) // verify that the task has the sysctl option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls) // verify that the service also has the sysctl set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls, ) } } // TestServiceCreateCapabilities tests that a service created with capabilities options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the capabilities option // // []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // capabilities option with the correct value, we can assume that the capabilities has been // plumbed correctly. func TestCreateServiceCapabilities(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"), "setting service capabilities is unsupported before api v1.41", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // store the map we're going to be using everywhere. capAdd := []string{"CAP_SYS_CHROOT"} capDrop := []string{"CAP_NET_RAW"} // Create the service with the capabilities options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithCapabilities(capAdd, capDrop), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the capabilities option set? // 2. Does the task have the capabilities in the spec? // 3. Does the service have the capabilities in the spec? // // if all 3 of these things are true, we know that the capabilities has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the capabilities set on the container inspect, // we know that the capabilities is plumbed correctly. everything below that // level has been tested elsewhere. // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the capabilities option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.CapAdd, strslice.StrSlice(capAdd)) assert.DeepEqual(t, ctnr.HostConfig.CapDrop, strslice.StrSlice(capDrop)) // verify that the task has the capabilities option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityDrop, capDrop) // verify that the service also has the capabilities set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, capDrop) }
thaJeztah
9aa0b24dcfa8956e03644ec43cf09dadcd5b6223
aef8e4817251278524ed34a41cb456f5c7cf519b
This utility should probably also be updated to check the _actual_ state, but leaving that for a follow-up (similar to; https://github.com/moby/moby/blob/7b9275c0da707b030e62c96b679a976f31f929d3/integration/internal/swarm/states.go#L59-L70
thaJeztah
4,410
moby/moby
42,960
Fix TestCreateServiceSecretFileMode, TestCreateServiceConfigFileMode
hopefully fixes https://github.com/moby/moby/issues/37132 Looks like this test was broken from the start, and fully relied on a race condition. (Test was added in 65ee7fff02111bf696bc2fec442d07c2957f4151 / https://github.com/moby/moby/pull/36130) The problem is in the service's command: `ls -l /etc/config || /bin/top`, which will either: - exit immediately if the secret is mounted correctly at `/etc/config` (which it should) - keep running with `/bin/top` if the above failed After the service is created, the test enters a race-condition, checking for 1 task to be running (which it ocassionally is), after which it proceeds, and looks up the list of tasks of the service, to get the log output of `ls -l /etc/config`. This is another race: first of all, the original filter for that task lookup did not filter by `running`, so it would pick "any" task of the service (either failed, running, or "completed" (successfully exited) tasks). In the meantime though, SwarmKit kept reconciling the service, and creating new tasks, so even if the test was able to get the ID of the correct task, that task may already have been exited, and removed (task-limit is 5 by default), so only if the test was "lucky", it would be able to get the logs, but of course, chances were likely that it would be "too late", and the task already gone. The problem can be easily reproduced when running the steps manually: echo 'CONFIG' | docker config create myconfig - docker service create --config source=myconfig,target=/etc/config,mode=0777 --name myservice busybox sh -c 'ls -l /etc/config || /bin/top' The above creates the service, but it keeps retrying, because each task exits immediately (followed by SwarmKit reconciling and starting a new task); mjntpfkkyuuc1dpay4h00c4oo overall progress: 0 out of 1 tasks 1/1: ready [======================================> ] verify: Detected task failure ^COperation continuing in background. Use `docker service ps mjntpfkkyuuc1dpay4h00c4oo` to check progress. And checking the tasks for the service reveals that tasks exit cleanly (no error), but _do exit_, so swarm just keeps up reconciling, and spinning up new tasks; docker service ps myservice --no-trunc ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS 2wmcuv4vffnet8nybg3he4v9n myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Ready Ready less than a second ago 5p8b006uec125iq2892lxay64 \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete less than a second ago k8lpsvlak4b3nil0zfkexw61p \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 6 seconds ago vsunl5pi7e2n9ol3p89kvj6pn \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 11 seconds ago orxl8b6kt2l6dfznzzd4lij4s \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 17 seconds ago This patch changes the service's command to `sleep`, so that a successful task (after successfully performing `ls -l /etc/config`) continues to be running until the service is deleted. With that change, the service should (usually) reconcile immediately, which removes the race condition, and should also make it faster :) This patch changes the tests to use client.ServiceLogs() instead of using the service's tasklist to directly access container logs. This should also fix some failures that happened if some tasks failed to start before reconciling, in which case client.TaskList() (with the current filters), could return more tasks than anticipated (as it also contained the exited tasks); === RUN TestCreateServiceSecretFileMode create_test.go:291: assertion failed: 2 (int) != 1 (int) --- FAIL: TestCreateServiceSecretFileMode (7.88s) === RUN TestCreateServiceConfigFileMode create_test.go:355: assertion failed: 2 (int) != 1 (int) --- FAIL: TestCreateServiceConfigFileMode (7.87s)
null
2021-10-22 13:38:08+00:00
2021-10-27 10:29:24+00:00
integration/service/create_test.go
package service // import "github.com/docker/docker/integration/service" import ( "context" "fmt" "io" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/strslice" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/client" "github.com/docker/docker/errdefs" "github.com/docker/docker/integration/internal/network" "github.com/docker/docker/integration/internal/swarm" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestServiceCreateInit(t *testing.T) { defer setupTest(t)() t.Run("daemonInitDisabled", testServiceCreateInit(false)) t.Run("daemonInitEnabled", testServiceCreateInit(true)) } func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) { return func(t *testing.T) { var ops = []daemon.Option{} if daemonEnabled { ops = append(ops, daemon.WithInit()) } d := swarm.NewSwarm(t, testEnv, ops...) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() booleanTrue := true booleanFalse := false serviceID := swarm.CreateService(t, d) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i := inspectServiceContainer(t, client, serviceID) // HostConfig.Init == nil means that it delegates to daemon configuration assert.Check(t, i.HostConfig.Init == nil) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(true, *i.HostConfig.Init)) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(false, *i.HostConfig.Init)) } } func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON { t.Helper() filter := filters.NewArgs() filter.Add("label", fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID)) containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{Filters: filter}) assert.NilError(t, err) assert.Check(t, is.Len(containers, 1)) i, err := client.ContainerInspect(context.Background(), containers[0].ID) assert.NilError(t, err) return i } func TestCreateServiceMultipleTimes(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() overlayName := "overlay1_" + t.Name() overlayID := network.CreateNoError(ctx, t, client, overlayName, network.WithCheckDuplicate(), network.WithDriver("overlay"), ) var instances uint64 = 4 serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(overlayName), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) err = client.ServiceRemove(context.Background(), serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) serviceID2 := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll) err = client.ServiceRemove(context.Background(), serviceID2) assert.NilError(t, err) // we can't just wait on no tasks for the service, counter-intuitively. // Tasks may briefly exist but not show up, if they are are in the process // of being deallocated. To avoid this case, we should retry network remove // a few times, to give tasks time to be deallcoated poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll) for retry := 0; retry < 5; retry++ { err = client.NetworkRemove(context.Background(), overlayID) // TODO(dperny): using strings.Contains for error checking is awful, // but so is the fact that swarm functions don't return errdefs errors. // I don't have time at this moment to fix the latter, so I guess I'll // go with the former. // // The full error we're looking for is something like this: // // Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v // // The safest way to catch this, I think, will be to match on "is in // use by", as this is an uninterrupted string that best identifies // this error. if err == nil || !strings.Contains(err.Error(), "is in use by") { // if there is no error, or the error isn't this kind of error, // then we'll break the loop body, and either fail the test or // continue. break } } assert.NilError(t, err) poll.WaitOn(t, network.IsRemoved(context.Background(), client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceConflict(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) c := d.NewClientT(t) defer c.Close() ctx := context.Background() serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithName(serviceName), } swarm.CreateService(t, d, serviceSpec...) spec := swarm.CreateServiceSpec(t, serviceSpec...) _, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{}) assert.Check(t, errdefs.IsConflict(err)) assert.ErrorContains(t, err, "service "+serviceName+" already exists") } func TestCreateServiceMaxReplicas(t *testing.T) { defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() var maxReplicas uint64 = 2 serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(maxReplicas), swarm.ServiceWithMaxReplicas(maxReplicas), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, maxReplicas), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) } func TestCreateWithDuplicateNetworkNames(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() name := "foo_" + t.Name() n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) // Duplicates with name but with different driver n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay")) // Create Service with the same name var instances uint64 = 1 serviceName := "top_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(name), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target)) // Remove Service, and wait for its tasks to be removed err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) // Remove networks err = client.NetworkRemove(context.Background(), n3) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n2) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n1) assert.NilError(t, err) // Make sure networks have been destroyed. poll.WaitOn(t, network.IsRemoved(context.Background(), client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceSecretFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() secretName := "TestSecret_" + t.Name() secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ Annotations: swarmtypes.Annotations{ Name: secretName, }, Data: []byte("TESTSECRET"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret || /bin/top"}), swarm.ServiceWithSecret(&swarmtypes.SecretReference{ File: &swarmtypes.SecretReferenceFileTarget{ Name: "/etc/secret", UID: "0", GID: "0", Mode: 0777, }, SecretID: secretResp.ID, SecretName: secretName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) err = client.SecretRemove(ctx, secretName) assert.NilError(t, err) } func TestCreateServiceConfigFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() configName := "TestConfig_" + t.Name() configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ Annotations: swarmtypes.Annotations{ Name: configName, }, Data: []byte("TESTCONFIG"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config || /bin/top"}), swarm.ServiceWithReplicas(instances), swarm.ServiceWithConfig(&swarmtypes.ConfigReference{ File: &swarmtypes.ConfigReferenceFileTarget{ Name: "/etc/config", UID: "0", GID: "0", Mode: 0777, }, ConfigID: configResp.ID, ConfigName: configName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID)) err = client.ConfigRemove(ctx, configName) assert.NilError(t, err) } // TestServiceCreateSysctls tests that a service created with sysctl options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the sysctl option // // {"net.ipv4.ip_nonlocal_bind": "0"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // sysctl option with the correct value, we can assume that the sysctl has been // plumbed correctly. // // Next, we'll remove that service and create a new service with that option // set to 1. This means that no matter what the default is, we can be confident // that the sysctl option is applying as intended. // // Additionally, we'll do service and task inspects to verify that the inspect // output includes the desired sysctl option. // // We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly // confident won't be modified by the container runtime, and won't blow // anything up in the test environment func TestCreateServiceSysctls(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "setting service sysctls is unsupported before api v1.40", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // run thie block twice, so that no matter what the default value of // net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl // options works for _, expected := range []string{"0", "1"} { // store the map we're going to be using everywhere. expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected} // Create the service with the sysctl options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithSysctls(expectedSysctls), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the sysctl option set? // 2. Does the task have the sysctl in the spec? // 3. Does the service have the sysctl in the spec? // // if all 3 of these things are true, we know that the sysctl has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the sysctl set on the container inspect, // we know that the sysctl is plumbed correctly. everything below that // level has been tested elsewhere. (thanks @thaJeztah, because an // earlier version of this test had to get container logs and was much // more complex) // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the sysctl option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls) // verify that the task has the sysctl option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls) // verify that the service also has the sysctl set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls, ) } } // TestServiceCreateCapabilities tests that a service created with capabilities options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the capabilities option // // []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // capabilities option with the correct value, we can assume that the capabilities has been // plumbed correctly. func TestCreateServiceCapabilities(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"), "setting service capabilities is unsupported before api v1.41", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // store the map we're going to be using everywhere. capAdd := []string{"CAP_SYS_CHROOT"} capDrop := []string{"CAP_NET_RAW"} // Create the service with the capabilities options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithCapabilities(capAdd, capDrop), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the capabilities option set? // 2. Does the task have the capabilities in the spec? // 3. Does the service have the capabilities in the spec? // // if all 3 of these things are true, we know that the capabilities has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the capabilities set on the container inspect, // we know that the capabilities is plumbed correctly. everything below that // level has been tested elsewhere. // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the capabilities option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.CapAdd, strslice.StrSlice(capAdd)) assert.DeepEqual(t, ctnr.HostConfig.CapDrop, strslice.StrSlice(capDrop)) // verify that the task has the capabilities option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityDrop, capDrop) // verify that the service also has the capabilities set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, capDrop) }
package service // import "github.com/docker/docker/integration/service" import ( "context" "fmt" "io" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/strslice" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/client" "github.com/docker/docker/errdefs" "github.com/docker/docker/integration/internal/network" "github.com/docker/docker/integration/internal/swarm" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestServiceCreateInit(t *testing.T) { defer setupTest(t)() t.Run("daemonInitDisabled", testServiceCreateInit(false)) t.Run("daemonInitEnabled", testServiceCreateInit(true)) } func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) { return func(t *testing.T) { var ops = []daemon.Option{} if daemonEnabled { ops = append(ops, daemon.WithInit()) } d := swarm.NewSwarm(t, testEnv, ops...) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() booleanTrue := true booleanFalse := false serviceID := swarm.CreateService(t, d) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i := inspectServiceContainer(t, client, serviceID) // HostConfig.Init == nil means that it delegates to daemon configuration assert.Check(t, i.HostConfig.Init == nil) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(true, *i.HostConfig.Init)) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(false, *i.HostConfig.Init)) } } func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON { t.Helper() filter := filters.NewArgs() filter.Add("label", fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID)) containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{Filters: filter}) assert.NilError(t, err) assert.Check(t, is.Len(containers, 1)) i, err := client.ContainerInspect(context.Background(), containers[0].ID) assert.NilError(t, err) return i } func TestCreateServiceMultipleTimes(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() overlayName := "overlay1_" + t.Name() overlayID := network.CreateNoError(ctx, t, client, overlayName, network.WithCheckDuplicate(), network.WithDriver("overlay"), ) var instances uint64 = 4 serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(overlayName), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) err = client.ServiceRemove(context.Background(), serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) serviceID2 := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll) err = client.ServiceRemove(context.Background(), serviceID2) assert.NilError(t, err) // we can't just wait on no tasks for the service, counter-intuitively. // Tasks may briefly exist but not show up, if they are are in the process // of being deallocated. To avoid this case, we should retry network remove // a few times, to give tasks time to be deallcoated poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll) for retry := 0; retry < 5; retry++ { err = client.NetworkRemove(context.Background(), overlayID) // TODO(dperny): using strings.Contains for error checking is awful, // but so is the fact that swarm functions don't return errdefs errors. // I don't have time at this moment to fix the latter, so I guess I'll // go with the former. // // The full error we're looking for is something like this: // // Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v // // The safest way to catch this, I think, will be to match on "is in // use by", as this is an uninterrupted string that best identifies // this error. if err == nil || !strings.Contains(err.Error(), "is in use by") { // if there is no error, or the error isn't this kind of error, // then we'll break the loop body, and either fail the test or // continue. break } } assert.NilError(t, err) poll.WaitOn(t, network.IsRemoved(context.Background(), client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceConflict(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) c := d.NewClientT(t) defer c.Close() ctx := context.Background() serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithName(serviceName), } swarm.CreateService(t, d, serviceSpec...) spec := swarm.CreateServiceSpec(t, serviceSpec...) _, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{}) assert.Check(t, errdefs.IsConflict(err)) assert.ErrorContains(t, err, "service "+serviceName+" already exists") } func TestCreateServiceMaxReplicas(t *testing.T) { defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() var maxReplicas uint64 = 2 serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(maxReplicas), swarm.ServiceWithMaxReplicas(maxReplicas), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, maxReplicas), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) } func TestCreateWithDuplicateNetworkNames(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() name := "foo_" + t.Name() n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) // Duplicates with name but with different driver n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay")) // Create Service with the same name var instances uint64 = 1 serviceName := "top_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(name), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target)) // Remove Service, and wait for its tasks to be removed err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) // Remove networks err = client.NetworkRemove(context.Background(), n3) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n2) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n1) assert.NilError(t, err) // Make sure networks have been destroyed. poll.WaitOn(t, network.IsRemoved(context.Background(), client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceSecretFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() secretName := "TestSecret_" + t.Name() secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ Annotations: swarmtypes.Annotations{ Name: secretName, }, Data: []byte("TESTSECRET"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret && sleep inf"}), swarm.ServiceWithSecret(&swarmtypes.SecretReference{ File: &swarmtypes.SecretReferenceFileTarget{ Name: "/etc/secret", UID: "0", GID: "0", Mode: 0777, }, SecretID: secretResp.ID, SecretName: secretName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) body, err := client.ServiceLogs(ctx, serviceID, types.ContainerLogsOptions{ Tail: "1", ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) err = client.SecretRemove(ctx, secretName) assert.NilError(t, err) } func TestCreateServiceConfigFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() configName := "TestConfig_" + t.Name() configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ Annotations: swarmtypes.Annotations{ Name: configName, }, Data: []byte("TESTCONFIG"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config && sleep inf"}), swarm.ServiceWithReplicas(instances), swarm.ServiceWithConfig(&swarmtypes.ConfigReference{ File: &swarmtypes.ConfigReferenceFileTarget{ Name: "/etc/config", UID: "0", GID: "0", Mode: 0777, }, ConfigID: configResp.ID, ConfigName: configName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) body, err := client.ServiceLogs(ctx, serviceID, types.ContainerLogsOptions{ Tail: "1", ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID)) err = client.ConfigRemove(ctx, configName) assert.NilError(t, err) } // TestServiceCreateSysctls tests that a service created with sysctl options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the sysctl option // // {"net.ipv4.ip_nonlocal_bind": "0"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // sysctl option with the correct value, we can assume that the sysctl has been // plumbed correctly. // // Next, we'll remove that service and create a new service with that option // set to 1. This means that no matter what the default is, we can be confident // that the sysctl option is applying as intended. // // Additionally, we'll do service and task inspects to verify that the inspect // output includes the desired sysctl option. // // We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly // confident won't be modified by the container runtime, and won't blow // anything up in the test environment func TestCreateServiceSysctls(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "setting service sysctls is unsupported before api v1.40", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // run thie block twice, so that no matter what the default value of // net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl // options works for _, expected := range []string{"0", "1"} { // store the map we're going to be using everywhere. expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected} // Create the service with the sysctl options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithSysctls(expectedSysctls), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the sysctl option set? // 2. Does the task have the sysctl in the spec? // 3. Does the service have the sysctl in the spec? // // if all 3 of these things are true, we know that the sysctl has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the sysctl set on the container inspect, // we know that the sysctl is plumbed correctly. everything below that // level has been tested elsewhere. (thanks @thaJeztah, because an // earlier version of this test had to get container logs and was much // more complex) // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the sysctl option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls) // verify that the task has the sysctl option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls) // verify that the service also has the sysctl set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls, ) } } // TestServiceCreateCapabilities tests that a service created with capabilities options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the capabilities option // // []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // capabilities option with the correct value, we can assume that the capabilities has been // plumbed correctly. func TestCreateServiceCapabilities(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"), "setting service capabilities is unsupported before api v1.41", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // store the map we're going to be using everywhere. capAdd := []string{"CAP_SYS_CHROOT"} capDrop := []string{"CAP_NET_RAW"} // Create the service with the capabilities options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithCapabilities(capAdd, capDrop), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the capabilities option set? // 2. Does the task have the capabilities in the spec? // 3. Does the service have the capabilities in the spec? // // if all 3 of these things are true, we know that the capabilities has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the capabilities set on the container inspect, // we know that the capabilities is plumbed correctly. everything below that // level has been tested elsewhere. // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the capabilities option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.CapAdd, strslice.StrSlice(capAdd)) assert.DeepEqual(t, ctnr.HostConfig.CapDrop, strslice.StrSlice(capDrop)) // verify that the task has the capabilities option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityDrop, capDrop) // verify that the service also has the capabilities set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, capDrop) }
thaJeztah
9aa0b24dcfa8956e03644ec43cf09dadcd5b6223
aef8e4817251278524ed34a41cb456f5c7cf519b
FWIW, we don't really need these tasks to keep running (running once and print the output would be sufficient), but keeping them running makes sure SwarmKit doesn't continue reconciling / spinning up new tasks. _Possibly_ we could simplify the test to get the results out in some different way, but I'm gonna leave that for a future exercise.
thaJeztah
4,411
moby/moby
42,960
Fix TestCreateServiceSecretFileMode, TestCreateServiceConfigFileMode
hopefully fixes https://github.com/moby/moby/issues/37132 Looks like this test was broken from the start, and fully relied on a race condition. (Test was added in 65ee7fff02111bf696bc2fec442d07c2957f4151 / https://github.com/moby/moby/pull/36130) The problem is in the service's command: `ls -l /etc/config || /bin/top`, which will either: - exit immediately if the secret is mounted correctly at `/etc/config` (which it should) - keep running with `/bin/top` if the above failed After the service is created, the test enters a race-condition, checking for 1 task to be running (which it ocassionally is), after which it proceeds, and looks up the list of tasks of the service, to get the log output of `ls -l /etc/config`. This is another race: first of all, the original filter for that task lookup did not filter by `running`, so it would pick "any" task of the service (either failed, running, or "completed" (successfully exited) tasks). In the meantime though, SwarmKit kept reconciling the service, and creating new tasks, so even if the test was able to get the ID of the correct task, that task may already have been exited, and removed (task-limit is 5 by default), so only if the test was "lucky", it would be able to get the logs, but of course, chances were likely that it would be "too late", and the task already gone. The problem can be easily reproduced when running the steps manually: echo 'CONFIG' | docker config create myconfig - docker service create --config source=myconfig,target=/etc/config,mode=0777 --name myservice busybox sh -c 'ls -l /etc/config || /bin/top' The above creates the service, but it keeps retrying, because each task exits immediately (followed by SwarmKit reconciling and starting a new task); mjntpfkkyuuc1dpay4h00c4oo overall progress: 0 out of 1 tasks 1/1: ready [======================================> ] verify: Detected task failure ^COperation continuing in background. Use `docker service ps mjntpfkkyuuc1dpay4h00c4oo` to check progress. And checking the tasks for the service reveals that tasks exit cleanly (no error), but _do exit_, so swarm just keeps up reconciling, and spinning up new tasks; docker service ps myservice --no-trunc ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS 2wmcuv4vffnet8nybg3he4v9n myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Ready Ready less than a second ago 5p8b006uec125iq2892lxay64 \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete less than a second ago k8lpsvlak4b3nil0zfkexw61p \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 6 seconds ago vsunl5pi7e2n9ol3p89kvj6pn \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 11 seconds ago orxl8b6kt2l6dfznzzd4lij4s \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 17 seconds ago This patch changes the service's command to `sleep`, so that a successful task (after successfully performing `ls -l /etc/config`) continues to be running until the service is deleted. With that change, the service should (usually) reconcile immediately, which removes the race condition, and should also make it faster :) This patch changes the tests to use client.ServiceLogs() instead of using the service's tasklist to directly access container logs. This should also fix some failures that happened if some tasks failed to start before reconciling, in which case client.TaskList() (with the current filters), could return more tasks than anticipated (as it also contained the exited tasks); === RUN TestCreateServiceSecretFileMode create_test.go:291: assertion failed: 2 (int) != 1 (int) --- FAIL: TestCreateServiceSecretFileMode (7.88s) === RUN TestCreateServiceConfigFileMode create_test.go:355: assertion failed: 2 (int) != 1 (int) --- FAIL: TestCreateServiceConfigFileMode (7.87s)
null
2021-10-22 13:38:08+00:00
2021-10-27 10:29:24+00:00
integration/service/create_test.go
package service // import "github.com/docker/docker/integration/service" import ( "context" "fmt" "io" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/strslice" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/client" "github.com/docker/docker/errdefs" "github.com/docker/docker/integration/internal/network" "github.com/docker/docker/integration/internal/swarm" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestServiceCreateInit(t *testing.T) { defer setupTest(t)() t.Run("daemonInitDisabled", testServiceCreateInit(false)) t.Run("daemonInitEnabled", testServiceCreateInit(true)) } func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) { return func(t *testing.T) { var ops = []daemon.Option{} if daemonEnabled { ops = append(ops, daemon.WithInit()) } d := swarm.NewSwarm(t, testEnv, ops...) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() booleanTrue := true booleanFalse := false serviceID := swarm.CreateService(t, d) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i := inspectServiceContainer(t, client, serviceID) // HostConfig.Init == nil means that it delegates to daemon configuration assert.Check(t, i.HostConfig.Init == nil) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(true, *i.HostConfig.Init)) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(false, *i.HostConfig.Init)) } } func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON { t.Helper() filter := filters.NewArgs() filter.Add("label", fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID)) containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{Filters: filter}) assert.NilError(t, err) assert.Check(t, is.Len(containers, 1)) i, err := client.ContainerInspect(context.Background(), containers[0].ID) assert.NilError(t, err) return i } func TestCreateServiceMultipleTimes(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() overlayName := "overlay1_" + t.Name() overlayID := network.CreateNoError(ctx, t, client, overlayName, network.WithCheckDuplicate(), network.WithDriver("overlay"), ) var instances uint64 = 4 serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(overlayName), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) err = client.ServiceRemove(context.Background(), serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) serviceID2 := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll) err = client.ServiceRemove(context.Background(), serviceID2) assert.NilError(t, err) // we can't just wait on no tasks for the service, counter-intuitively. // Tasks may briefly exist but not show up, if they are are in the process // of being deallocated. To avoid this case, we should retry network remove // a few times, to give tasks time to be deallcoated poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll) for retry := 0; retry < 5; retry++ { err = client.NetworkRemove(context.Background(), overlayID) // TODO(dperny): using strings.Contains for error checking is awful, // but so is the fact that swarm functions don't return errdefs errors. // I don't have time at this moment to fix the latter, so I guess I'll // go with the former. // // The full error we're looking for is something like this: // // Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v // // The safest way to catch this, I think, will be to match on "is in // use by", as this is an uninterrupted string that best identifies // this error. if err == nil || !strings.Contains(err.Error(), "is in use by") { // if there is no error, or the error isn't this kind of error, // then we'll break the loop body, and either fail the test or // continue. break } } assert.NilError(t, err) poll.WaitOn(t, network.IsRemoved(context.Background(), client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceConflict(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) c := d.NewClientT(t) defer c.Close() ctx := context.Background() serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithName(serviceName), } swarm.CreateService(t, d, serviceSpec...) spec := swarm.CreateServiceSpec(t, serviceSpec...) _, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{}) assert.Check(t, errdefs.IsConflict(err)) assert.ErrorContains(t, err, "service "+serviceName+" already exists") } func TestCreateServiceMaxReplicas(t *testing.T) { defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() var maxReplicas uint64 = 2 serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(maxReplicas), swarm.ServiceWithMaxReplicas(maxReplicas), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, maxReplicas), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) } func TestCreateWithDuplicateNetworkNames(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() name := "foo_" + t.Name() n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) // Duplicates with name but with different driver n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay")) // Create Service with the same name var instances uint64 = 1 serviceName := "top_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(name), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target)) // Remove Service, and wait for its tasks to be removed err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) // Remove networks err = client.NetworkRemove(context.Background(), n3) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n2) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n1) assert.NilError(t, err) // Make sure networks have been destroyed. poll.WaitOn(t, network.IsRemoved(context.Background(), client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceSecretFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() secretName := "TestSecret_" + t.Name() secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ Annotations: swarmtypes.Annotations{ Name: secretName, }, Data: []byte("TESTSECRET"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret || /bin/top"}), swarm.ServiceWithSecret(&swarmtypes.SecretReference{ File: &swarmtypes.SecretReferenceFileTarget{ Name: "/etc/secret", UID: "0", GID: "0", Mode: 0777, }, SecretID: secretResp.ID, SecretName: secretName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) err = client.SecretRemove(ctx, secretName) assert.NilError(t, err) } func TestCreateServiceConfigFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() configName := "TestConfig_" + t.Name() configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ Annotations: swarmtypes.Annotations{ Name: configName, }, Data: []byte("TESTCONFIG"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config || /bin/top"}), swarm.ServiceWithReplicas(instances), swarm.ServiceWithConfig(&swarmtypes.ConfigReference{ File: &swarmtypes.ConfigReferenceFileTarget{ Name: "/etc/config", UID: "0", GID: "0", Mode: 0777, }, ConfigID: configResp.ID, ConfigName: configName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID)) err = client.ConfigRemove(ctx, configName) assert.NilError(t, err) } // TestServiceCreateSysctls tests that a service created with sysctl options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the sysctl option // // {"net.ipv4.ip_nonlocal_bind": "0"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // sysctl option with the correct value, we can assume that the sysctl has been // plumbed correctly. // // Next, we'll remove that service and create a new service with that option // set to 1. This means that no matter what the default is, we can be confident // that the sysctl option is applying as intended. // // Additionally, we'll do service and task inspects to verify that the inspect // output includes the desired sysctl option. // // We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly // confident won't be modified by the container runtime, and won't blow // anything up in the test environment func TestCreateServiceSysctls(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "setting service sysctls is unsupported before api v1.40", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // run thie block twice, so that no matter what the default value of // net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl // options works for _, expected := range []string{"0", "1"} { // store the map we're going to be using everywhere. expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected} // Create the service with the sysctl options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithSysctls(expectedSysctls), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the sysctl option set? // 2. Does the task have the sysctl in the spec? // 3. Does the service have the sysctl in the spec? // // if all 3 of these things are true, we know that the sysctl has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the sysctl set on the container inspect, // we know that the sysctl is plumbed correctly. everything below that // level has been tested elsewhere. (thanks @thaJeztah, because an // earlier version of this test had to get container logs and was much // more complex) // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the sysctl option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls) // verify that the task has the sysctl option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls) // verify that the service also has the sysctl set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls, ) } } // TestServiceCreateCapabilities tests that a service created with capabilities options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the capabilities option // // []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // capabilities option with the correct value, we can assume that the capabilities has been // plumbed correctly. func TestCreateServiceCapabilities(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"), "setting service capabilities is unsupported before api v1.41", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // store the map we're going to be using everywhere. capAdd := []string{"CAP_SYS_CHROOT"} capDrop := []string{"CAP_NET_RAW"} // Create the service with the capabilities options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithCapabilities(capAdd, capDrop), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the capabilities option set? // 2. Does the task have the capabilities in the spec? // 3. Does the service have the capabilities in the spec? // // if all 3 of these things are true, we know that the capabilities has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the capabilities set on the container inspect, // we know that the capabilities is plumbed correctly. everything below that // level has been tested elsewhere. // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the capabilities option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.CapAdd, strslice.StrSlice(capAdd)) assert.DeepEqual(t, ctnr.HostConfig.CapDrop, strslice.StrSlice(capDrop)) // verify that the task has the capabilities option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityDrop, capDrop) // verify that the service also has the capabilities set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, capDrop) }
package service // import "github.com/docker/docker/integration/service" import ( "context" "fmt" "io" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/strslice" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/client" "github.com/docker/docker/errdefs" "github.com/docker/docker/integration/internal/network" "github.com/docker/docker/integration/internal/swarm" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestServiceCreateInit(t *testing.T) { defer setupTest(t)() t.Run("daemonInitDisabled", testServiceCreateInit(false)) t.Run("daemonInitEnabled", testServiceCreateInit(true)) } func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) { return func(t *testing.T) { var ops = []daemon.Option{} if daemonEnabled { ops = append(ops, daemon.WithInit()) } d := swarm.NewSwarm(t, testEnv, ops...) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() booleanTrue := true booleanFalse := false serviceID := swarm.CreateService(t, d) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i := inspectServiceContainer(t, client, serviceID) // HostConfig.Init == nil means that it delegates to daemon configuration assert.Check(t, i.HostConfig.Init == nil) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(true, *i.HostConfig.Init)) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(false, *i.HostConfig.Init)) } } func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON { t.Helper() filter := filters.NewArgs() filter.Add("label", fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID)) containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{Filters: filter}) assert.NilError(t, err) assert.Check(t, is.Len(containers, 1)) i, err := client.ContainerInspect(context.Background(), containers[0].ID) assert.NilError(t, err) return i } func TestCreateServiceMultipleTimes(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() overlayName := "overlay1_" + t.Name() overlayID := network.CreateNoError(ctx, t, client, overlayName, network.WithCheckDuplicate(), network.WithDriver("overlay"), ) var instances uint64 = 4 serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(overlayName), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) err = client.ServiceRemove(context.Background(), serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) serviceID2 := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll) err = client.ServiceRemove(context.Background(), serviceID2) assert.NilError(t, err) // we can't just wait on no tasks for the service, counter-intuitively. // Tasks may briefly exist but not show up, if they are are in the process // of being deallocated. To avoid this case, we should retry network remove // a few times, to give tasks time to be deallcoated poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll) for retry := 0; retry < 5; retry++ { err = client.NetworkRemove(context.Background(), overlayID) // TODO(dperny): using strings.Contains for error checking is awful, // but so is the fact that swarm functions don't return errdefs errors. // I don't have time at this moment to fix the latter, so I guess I'll // go with the former. // // The full error we're looking for is something like this: // // Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v // // The safest way to catch this, I think, will be to match on "is in // use by", as this is an uninterrupted string that best identifies // this error. if err == nil || !strings.Contains(err.Error(), "is in use by") { // if there is no error, or the error isn't this kind of error, // then we'll break the loop body, and either fail the test or // continue. break } } assert.NilError(t, err) poll.WaitOn(t, network.IsRemoved(context.Background(), client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceConflict(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) c := d.NewClientT(t) defer c.Close() ctx := context.Background() serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithName(serviceName), } swarm.CreateService(t, d, serviceSpec...) spec := swarm.CreateServiceSpec(t, serviceSpec...) _, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{}) assert.Check(t, errdefs.IsConflict(err)) assert.ErrorContains(t, err, "service "+serviceName+" already exists") } func TestCreateServiceMaxReplicas(t *testing.T) { defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() var maxReplicas uint64 = 2 serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(maxReplicas), swarm.ServiceWithMaxReplicas(maxReplicas), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, maxReplicas), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) } func TestCreateWithDuplicateNetworkNames(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() name := "foo_" + t.Name() n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) // Duplicates with name but with different driver n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay")) // Create Service with the same name var instances uint64 = 1 serviceName := "top_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(name), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target)) // Remove Service, and wait for its tasks to be removed err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) // Remove networks err = client.NetworkRemove(context.Background(), n3) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n2) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n1) assert.NilError(t, err) // Make sure networks have been destroyed. poll.WaitOn(t, network.IsRemoved(context.Background(), client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceSecretFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() secretName := "TestSecret_" + t.Name() secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ Annotations: swarmtypes.Annotations{ Name: secretName, }, Data: []byte("TESTSECRET"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret && sleep inf"}), swarm.ServiceWithSecret(&swarmtypes.SecretReference{ File: &swarmtypes.SecretReferenceFileTarget{ Name: "/etc/secret", UID: "0", GID: "0", Mode: 0777, }, SecretID: secretResp.ID, SecretName: secretName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) body, err := client.ServiceLogs(ctx, serviceID, types.ContainerLogsOptions{ Tail: "1", ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) err = client.SecretRemove(ctx, secretName) assert.NilError(t, err) } func TestCreateServiceConfigFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() configName := "TestConfig_" + t.Name() configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ Annotations: swarmtypes.Annotations{ Name: configName, }, Data: []byte("TESTCONFIG"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config && sleep inf"}), swarm.ServiceWithReplicas(instances), swarm.ServiceWithConfig(&swarmtypes.ConfigReference{ File: &swarmtypes.ConfigReferenceFileTarget{ Name: "/etc/config", UID: "0", GID: "0", Mode: 0777, }, ConfigID: configResp.ID, ConfigName: configName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) body, err := client.ServiceLogs(ctx, serviceID, types.ContainerLogsOptions{ Tail: "1", ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID)) err = client.ConfigRemove(ctx, configName) assert.NilError(t, err) } // TestServiceCreateSysctls tests that a service created with sysctl options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the sysctl option // // {"net.ipv4.ip_nonlocal_bind": "0"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // sysctl option with the correct value, we can assume that the sysctl has been // plumbed correctly. // // Next, we'll remove that service and create a new service with that option // set to 1. This means that no matter what the default is, we can be confident // that the sysctl option is applying as intended. // // Additionally, we'll do service and task inspects to verify that the inspect // output includes the desired sysctl option. // // We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly // confident won't be modified by the container runtime, and won't blow // anything up in the test environment func TestCreateServiceSysctls(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "setting service sysctls is unsupported before api v1.40", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // run thie block twice, so that no matter what the default value of // net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl // options works for _, expected := range []string{"0", "1"} { // store the map we're going to be using everywhere. expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected} // Create the service with the sysctl options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithSysctls(expectedSysctls), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the sysctl option set? // 2. Does the task have the sysctl in the spec? // 3. Does the service have the sysctl in the spec? // // if all 3 of these things are true, we know that the sysctl has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the sysctl set on the container inspect, // we know that the sysctl is plumbed correctly. everything below that // level has been tested elsewhere. (thanks @thaJeztah, because an // earlier version of this test had to get container logs and was much // more complex) // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the sysctl option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls) // verify that the task has the sysctl option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls) // verify that the service also has the sysctl set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls, ) } } // TestServiceCreateCapabilities tests that a service created with capabilities options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the capabilities option // // []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // capabilities option with the correct value, we can assume that the capabilities has been // plumbed correctly. func TestCreateServiceCapabilities(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"), "setting service capabilities is unsupported before api v1.41", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // store the map we're going to be using everywhere. capAdd := []string{"CAP_SYS_CHROOT"} capDrop := []string{"CAP_NET_RAW"} // Create the service with the capabilities options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithCapabilities(capAdd, capDrop), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the capabilities option set? // 2. Does the task have the capabilities in the spec? // 3. Does the service have the capabilities in the spec? // // if all 3 of these things are true, we know that the capabilities has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the capabilities set on the container inspect, // we know that the capabilities is plumbed correctly. everything below that // level has been tested elsewhere. // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the capabilities option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.CapAdd, strslice.StrSlice(capAdd)) assert.DeepEqual(t, ctnr.HostConfig.CapDrop, strslice.StrSlice(capDrop)) // verify that the task has the capabilities option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityDrop, capDrop) // verify that the service also has the capabilities set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, capDrop) }
thaJeztah
9aa0b24dcfa8956e03644ec43cf09dadcd5b6223
aef8e4817251278524ed34a41cb456f5c7cf519b
Actually, I think we can just use `client.ServiceLogs()` here; let me update
thaJeztah
4,412
moby/moby
42,960
Fix TestCreateServiceSecretFileMode, TestCreateServiceConfigFileMode
hopefully fixes https://github.com/moby/moby/issues/37132 Looks like this test was broken from the start, and fully relied on a race condition. (Test was added in 65ee7fff02111bf696bc2fec442d07c2957f4151 / https://github.com/moby/moby/pull/36130) The problem is in the service's command: `ls -l /etc/config || /bin/top`, which will either: - exit immediately if the secret is mounted correctly at `/etc/config` (which it should) - keep running with `/bin/top` if the above failed After the service is created, the test enters a race-condition, checking for 1 task to be running (which it ocassionally is), after which it proceeds, and looks up the list of tasks of the service, to get the log output of `ls -l /etc/config`. This is another race: first of all, the original filter for that task lookup did not filter by `running`, so it would pick "any" task of the service (either failed, running, or "completed" (successfully exited) tasks). In the meantime though, SwarmKit kept reconciling the service, and creating new tasks, so even if the test was able to get the ID of the correct task, that task may already have been exited, and removed (task-limit is 5 by default), so only if the test was "lucky", it would be able to get the logs, but of course, chances were likely that it would be "too late", and the task already gone. The problem can be easily reproduced when running the steps manually: echo 'CONFIG' | docker config create myconfig - docker service create --config source=myconfig,target=/etc/config,mode=0777 --name myservice busybox sh -c 'ls -l /etc/config || /bin/top' The above creates the service, but it keeps retrying, because each task exits immediately (followed by SwarmKit reconciling and starting a new task); mjntpfkkyuuc1dpay4h00c4oo overall progress: 0 out of 1 tasks 1/1: ready [======================================> ] verify: Detected task failure ^COperation continuing in background. Use `docker service ps mjntpfkkyuuc1dpay4h00c4oo` to check progress. And checking the tasks for the service reveals that tasks exit cleanly (no error), but _do exit_, so swarm just keeps up reconciling, and spinning up new tasks; docker service ps myservice --no-trunc ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS 2wmcuv4vffnet8nybg3he4v9n myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Ready Ready less than a second ago 5p8b006uec125iq2892lxay64 \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete less than a second ago k8lpsvlak4b3nil0zfkexw61p \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 6 seconds ago vsunl5pi7e2n9ol3p89kvj6pn \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 11 seconds ago orxl8b6kt2l6dfznzzd4lij4s \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 17 seconds ago This patch changes the service's command to `sleep`, so that a successful task (after successfully performing `ls -l /etc/config`) continues to be running until the service is deleted. With that change, the service should (usually) reconcile immediately, which removes the race condition, and should also make it faster :) This patch changes the tests to use client.ServiceLogs() instead of using the service's tasklist to directly access container logs. This should also fix some failures that happened if some tasks failed to start before reconciling, in which case client.TaskList() (with the current filters), could return more tasks than anticipated (as it also contained the exited tasks); === RUN TestCreateServiceSecretFileMode create_test.go:291: assertion failed: 2 (int) != 1 (int) --- FAIL: TestCreateServiceSecretFileMode (7.88s) === RUN TestCreateServiceConfigFileMode create_test.go:355: assertion failed: 2 (int) != 1 (int) --- FAIL: TestCreateServiceConfigFileMode (7.87s)
null
2021-10-22 13:38:08+00:00
2021-10-27 10:29:24+00:00
integration/service/create_test.go
package service // import "github.com/docker/docker/integration/service" import ( "context" "fmt" "io" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/strslice" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/client" "github.com/docker/docker/errdefs" "github.com/docker/docker/integration/internal/network" "github.com/docker/docker/integration/internal/swarm" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestServiceCreateInit(t *testing.T) { defer setupTest(t)() t.Run("daemonInitDisabled", testServiceCreateInit(false)) t.Run("daemonInitEnabled", testServiceCreateInit(true)) } func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) { return func(t *testing.T) { var ops = []daemon.Option{} if daemonEnabled { ops = append(ops, daemon.WithInit()) } d := swarm.NewSwarm(t, testEnv, ops...) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() booleanTrue := true booleanFalse := false serviceID := swarm.CreateService(t, d) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i := inspectServiceContainer(t, client, serviceID) // HostConfig.Init == nil means that it delegates to daemon configuration assert.Check(t, i.HostConfig.Init == nil) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(true, *i.HostConfig.Init)) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(false, *i.HostConfig.Init)) } } func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON { t.Helper() filter := filters.NewArgs() filter.Add("label", fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID)) containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{Filters: filter}) assert.NilError(t, err) assert.Check(t, is.Len(containers, 1)) i, err := client.ContainerInspect(context.Background(), containers[0].ID) assert.NilError(t, err) return i } func TestCreateServiceMultipleTimes(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() overlayName := "overlay1_" + t.Name() overlayID := network.CreateNoError(ctx, t, client, overlayName, network.WithCheckDuplicate(), network.WithDriver("overlay"), ) var instances uint64 = 4 serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(overlayName), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) err = client.ServiceRemove(context.Background(), serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) serviceID2 := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll) err = client.ServiceRemove(context.Background(), serviceID2) assert.NilError(t, err) // we can't just wait on no tasks for the service, counter-intuitively. // Tasks may briefly exist but not show up, if they are are in the process // of being deallocated. To avoid this case, we should retry network remove // a few times, to give tasks time to be deallcoated poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll) for retry := 0; retry < 5; retry++ { err = client.NetworkRemove(context.Background(), overlayID) // TODO(dperny): using strings.Contains for error checking is awful, // but so is the fact that swarm functions don't return errdefs errors. // I don't have time at this moment to fix the latter, so I guess I'll // go with the former. // // The full error we're looking for is something like this: // // Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v // // The safest way to catch this, I think, will be to match on "is in // use by", as this is an uninterrupted string that best identifies // this error. if err == nil || !strings.Contains(err.Error(), "is in use by") { // if there is no error, or the error isn't this kind of error, // then we'll break the loop body, and either fail the test or // continue. break } } assert.NilError(t, err) poll.WaitOn(t, network.IsRemoved(context.Background(), client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceConflict(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) c := d.NewClientT(t) defer c.Close() ctx := context.Background() serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithName(serviceName), } swarm.CreateService(t, d, serviceSpec...) spec := swarm.CreateServiceSpec(t, serviceSpec...) _, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{}) assert.Check(t, errdefs.IsConflict(err)) assert.ErrorContains(t, err, "service "+serviceName+" already exists") } func TestCreateServiceMaxReplicas(t *testing.T) { defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() var maxReplicas uint64 = 2 serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(maxReplicas), swarm.ServiceWithMaxReplicas(maxReplicas), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, maxReplicas), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) } func TestCreateWithDuplicateNetworkNames(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() name := "foo_" + t.Name() n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) // Duplicates with name but with different driver n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay")) // Create Service with the same name var instances uint64 = 1 serviceName := "top_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(name), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target)) // Remove Service, and wait for its tasks to be removed err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) // Remove networks err = client.NetworkRemove(context.Background(), n3) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n2) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n1) assert.NilError(t, err) // Make sure networks have been destroyed. poll.WaitOn(t, network.IsRemoved(context.Background(), client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceSecretFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() secretName := "TestSecret_" + t.Name() secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ Annotations: swarmtypes.Annotations{ Name: secretName, }, Data: []byte("TESTSECRET"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret || /bin/top"}), swarm.ServiceWithSecret(&swarmtypes.SecretReference{ File: &swarmtypes.SecretReferenceFileTarget{ Name: "/etc/secret", UID: "0", GID: "0", Mode: 0777, }, SecretID: secretResp.ID, SecretName: secretName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) err = client.SecretRemove(ctx, secretName) assert.NilError(t, err) } func TestCreateServiceConfigFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() configName := "TestConfig_" + t.Name() configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ Annotations: swarmtypes.Annotations{ Name: configName, }, Data: []byte("TESTCONFIG"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config || /bin/top"}), swarm.ServiceWithReplicas(instances), swarm.ServiceWithConfig(&swarmtypes.ConfigReference{ File: &swarmtypes.ConfigReferenceFileTarget{ Name: "/etc/config", UID: "0", GID: "0", Mode: 0777, }, ConfigID: configResp.ID, ConfigName: configName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID)) err = client.ConfigRemove(ctx, configName) assert.NilError(t, err) } // TestServiceCreateSysctls tests that a service created with sysctl options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the sysctl option // // {"net.ipv4.ip_nonlocal_bind": "0"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // sysctl option with the correct value, we can assume that the sysctl has been // plumbed correctly. // // Next, we'll remove that service and create a new service with that option // set to 1. This means that no matter what the default is, we can be confident // that the sysctl option is applying as intended. // // Additionally, we'll do service and task inspects to verify that the inspect // output includes the desired sysctl option. // // We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly // confident won't be modified by the container runtime, and won't blow // anything up in the test environment func TestCreateServiceSysctls(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "setting service sysctls is unsupported before api v1.40", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // run thie block twice, so that no matter what the default value of // net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl // options works for _, expected := range []string{"0", "1"} { // store the map we're going to be using everywhere. expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected} // Create the service with the sysctl options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithSysctls(expectedSysctls), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the sysctl option set? // 2. Does the task have the sysctl in the spec? // 3. Does the service have the sysctl in the spec? // // if all 3 of these things are true, we know that the sysctl has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the sysctl set on the container inspect, // we know that the sysctl is plumbed correctly. everything below that // level has been tested elsewhere. (thanks @thaJeztah, because an // earlier version of this test had to get container logs and was much // more complex) // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the sysctl option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls) // verify that the task has the sysctl option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls) // verify that the service also has the sysctl set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls, ) } } // TestServiceCreateCapabilities tests that a service created with capabilities options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the capabilities option // // []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // capabilities option with the correct value, we can assume that the capabilities has been // plumbed correctly. func TestCreateServiceCapabilities(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"), "setting service capabilities is unsupported before api v1.41", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // store the map we're going to be using everywhere. capAdd := []string{"CAP_SYS_CHROOT"} capDrop := []string{"CAP_NET_RAW"} // Create the service with the capabilities options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithCapabilities(capAdd, capDrop), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the capabilities option set? // 2. Does the task have the capabilities in the spec? // 3. Does the service have the capabilities in the spec? // // if all 3 of these things are true, we know that the capabilities has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the capabilities set on the container inspect, // we know that the capabilities is plumbed correctly. everything below that // level has been tested elsewhere. // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the capabilities option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.CapAdd, strslice.StrSlice(capAdd)) assert.DeepEqual(t, ctnr.HostConfig.CapDrop, strslice.StrSlice(capDrop)) // verify that the task has the capabilities option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityDrop, capDrop) // verify that the service also has the capabilities set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, capDrop) }
package service // import "github.com/docker/docker/integration/service" import ( "context" "fmt" "io" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/strslice" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/client" "github.com/docker/docker/errdefs" "github.com/docker/docker/integration/internal/network" "github.com/docker/docker/integration/internal/swarm" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestServiceCreateInit(t *testing.T) { defer setupTest(t)() t.Run("daemonInitDisabled", testServiceCreateInit(false)) t.Run("daemonInitEnabled", testServiceCreateInit(true)) } func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) { return func(t *testing.T) { var ops = []daemon.Option{} if daemonEnabled { ops = append(ops, daemon.WithInit()) } d := swarm.NewSwarm(t, testEnv, ops...) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() booleanTrue := true booleanFalse := false serviceID := swarm.CreateService(t, d) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i := inspectServiceContainer(t, client, serviceID) // HostConfig.Init == nil means that it delegates to daemon configuration assert.Check(t, i.HostConfig.Init == nil) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(true, *i.HostConfig.Init)) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(false, *i.HostConfig.Init)) } } func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON { t.Helper() filter := filters.NewArgs() filter.Add("label", fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID)) containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{Filters: filter}) assert.NilError(t, err) assert.Check(t, is.Len(containers, 1)) i, err := client.ContainerInspect(context.Background(), containers[0].ID) assert.NilError(t, err) return i } func TestCreateServiceMultipleTimes(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() overlayName := "overlay1_" + t.Name() overlayID := network.CreateNoError(ctx, t, client, overlayName, network.WithCheckDuplicate(), network.WithDriver("overlay"), ) var instances uint64 = 4 serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(overlayName), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) err = client.ServiceRemove(context.Background(), serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) serviceID2 := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll) err = client.ServiceRemove(context.Background(), serviceID2) assert.NilError(t, err) // we can't just wait on no tasks for the service, counter-intuitively. // Tasks may briefly exist but not show up, if they are are in the process // of being deallocated. To avoid this case, we should retry network remove // a few times, to give tasks time to be deallcoated poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll) for retry := 0; retry < 5; retry++ { err = client.NetworkRemove(context.Background(), overlayID) // TODO(dperny): using strings.Contains for error checking is awful, // but so is the fact that swarm functions don't return errdefs errors. // I don't have time at this moment to fix the latter, so I guess I'll // go with the former. // // The full error we're looking for is something like this: // // Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v // // The safest way to catch this, I think, will be to match on "is in // use by", as this is an uninterrupted string that best identifies // this error. if err == nil || !strings.Contains(err.Error(), "is in use by") { // if there is no error, or the error isn't this kind of error, // then we'll break the loop body, and either fail the test or // continue. break } } assert.NilError(t, err) poll.WaitOn(t, network.IsRemoved(context.Background(), client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceConflict(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) c := d.NewClientT(t) defer c.Close() ctx := context.Background() serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithName(serviceName), } swarm.CreateService(t, d, serviceSpec...) spec := swarm.CreateServiceSpec(t, serviceSpec...) _, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{}) assert.Check(t, errdefs.IsConflict(err)) assert.ErrorContains(t, err, "service "+serviceName+" already exists") } func TestCreateServiceMaxReplicas(t *testing.T) { defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() var maxReplicas uint64 = 2 serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(maxReplicas), swarm.ServiceWithMaxReplicas(maxReplicas), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, maxReplicas), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) } func TestCreateWithDuplicateNetworkNames(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() name := "foo_" + t.Name() n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) // Duplicates with name but with different driver n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay")) // Create Service with the same name var instances uint64 = 1 serviceName := "top_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(name), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target)) // Remove Service, and wait for its tasks to be removed err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) // Remove networks err = client.NetworkRemove(context.Background(), n3) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n2) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n1) assert.NilError(t, err) // Make sure networks have been destroyed. poll.WaitOn(t, network.IsRemoved(context.Background(), client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceSecretFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() secretName := "TestSecret_" + t.Name() secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ Annotations: swarmtypes.Annotations{ Name: secretName, }, Data: []byte("TESTSECRET"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret && sleep inf"}), swarm.ServiceWithSecret(&swarmtypes.SecretReference{ File: &swarmtypes.SecretReferenceFileTarget{ Name: "/etc/secret", UID: "0", GID: "0", Mode: 0777, }, SecretID: secretResp.ID, SecretName: secretName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) body, err := client.ServiceLogs(ctx, serviceID, types.ContainerLogsOptions{ Tail: "1", ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) err = client.SecretRemove(ctx, secretName) assert.NilError(t, err) } func TestCreateServiceConfigFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() configName := "TestConfig_" + t.Name() configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ Annotations: swarmtypes.Annotations{ Name: configName, }, Data: []byte("TESTCONFIG"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config && sleep inf"}), swarm.ServiceWithReplicas(instances), swarm.ServiceWithConfig(&swarmtypes.ConfigReference{ File: &swarmtypes.ConfigReferenceFileTarget{ Name: "/etc/config", UID: "0", GID: "0", Mode: 0777, }, ConfigID: configResp.ID, ConfigName: configName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) body, err := client.ServiceLogs(ctx, serviceID, types.ContainerLogsOptions{ Tail: "1", ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID)) err = client.ConfigRemove(ctx, configName) assert.NilError(t, err) } // TestServiceCreateSysctls tests that a service created with sysctl options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the sysctl option // // {"net.ipv4.ip_nonlocal_bind": "0"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // sysctl option with the correct value, we can assume that the sysctl has been // plumbed correctly. // // Next, we'll remove that service and create a new service with that option // set to 1. This means that no matter what the default is, we can be confident // that the sysctl option is applying as intended. // // Additionally, we'll do service and task inspects to verify that the inspect // output includes the desired sysctl option. // // We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly // confident won't be modified by the container runtime, and won't blow // anything up in the test environment func TestCreateServiceSysctls(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "setting service sysctls is unsupported before api v1.40", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // run thie block twice, so that no matter what the default value of // net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl // options works for _, expected := range []string{"0", "1"} { // store the map we're going to be using everywhere. expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected} // Create the service with the sysctl options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithSysctls(expectedSysctls), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the sysctl option set? // 2. Does the task have the sysctl in the spec? // 3. Does the service have the sysctl in the spec? // // if all 3 of these things are true, we know that the sysctl has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the sysctl set on the container inspect, // we know that the sysctl is plumbed correctly. everything below that // level has been tested elsewhere. (thanks @thaJeztah, because an // earlier version of this test had to get container logs and was much // more complex) // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the sysctl option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls) // verify that the task has the sysctl option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls) // verify that the service also has the sysctl set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls, ) } } // TestServiceCreateCapabilities tests that a service created with capabilities options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the capabilities option // // []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // capabilities option with the correct value, we can assume that the capabilities has been // plumbed correctly. func TestCreateServiceCapabilities(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"), "setting service capabilities is unsupported before api v1.41", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // store the map we're going to be using everywhere. capAdd := []string{"CAP_SYS_CHROOT"} capDrop := []string{"CAP_NET_RAW"} // Create the service with the capabilities options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithCapabilities(capAdd, capDrop), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the capabilities option set? // 2. Does the task have the capabilities in the spec? // 3. Does the service have the capabilities in the spec? // // if all 3 of these things are true, we know that the capabilities has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the capabilities set on the container inspect, // we know that the capabilities is plumbed correctly. everything below that // level has been tested elsewhere. // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the capabilities option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.CapAdd, strslice.StrSlice(capAdd)) assert.DeepEqual(t, ctnr.HostConfig.CapDrop, strslice.StrSlice(capDrop)) // verify that the task has the capabilities option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityDrop, capDrop) // verify that the service also has the capabilities set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, capDrop) }
thaJeztah
9aa0b24dcfa8956e03644ec43cf09dadcd5b6223
aef8e4817251278524ed34a41cb456f5c7cf519b
1 line should be enough 😅 (famous last words)
thaJeztah
4,413
moby/moby
42,960
Fix TestCreateServiceSecretFileMode, TestCreateServiceConfigFileMode
hopefully fixes https://github.com/moby/moby/issues/37132 Looks like this test was broken from the start, and fully relied on a race condition. (Test was added in 65ee7fff02111bf696bc2fec442d07c2957f4151 / https://github.com/moby/moby/pull/36130) The problem is in the service's command: `ls -l /etc/config || /bin/top`, which will either: - exit immediately if the secret is mounted correctly at `/etc/config` (which it should) - keep running with `/bin/top` if the above failed After the service is created, the test enters a race-condition, checking for 1 task to be running (which it ocassionally is), after which it proceeds, and looks up the list of tasks of the service, to get the log output of `ls -l /etc/config`. This is another race: first of all, the original filter for that task lookup did not filter by `running`, so it would pick "any" task of the service (either failed, running, or "completed" (successfully exited) tasks). In the meantime though, SwarmKit kept reconciling the service, and creating new tasks, so even if the test was able to get the ID of the correct task, that task may already have been exited, and removed (task-limit is 5 by default), so only if the test was "lucky", it would be able to get the logs, but of course, chances were likely that it would be "too late", and the task already gone. The problem can be easily reproduced when running the steps manually: echo 'CONFIG' | docker config create myconfig - docker service create --config source=myconfig,target=/etc/config,mode=0777 --name myservice busybox sh -c 'ls -l /etc/config || /bin/top' The above creates the service, but it keeps retrying, because each task exits immediately (followed by SwarmKit reconciling and starting a new task); mjntpfkkyuuc1dpay4h00c4oo overall progress: 0 out of 1 tasks 1/1: ready [======================================> ] verify: Detected task failure ^COperation continuing in background. Use `docker service ps mjntpfkkyuuc1dpay4h00c4oo` to check progress. And checking the tasks for the service reveals that tasks exit cleanly (no error), but _do exit_, so swarm just keeps up reconciling, and spinning up new tasks; docker service ps myservice --no-trunc ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS 2wmcuv4vffnet8nybg3he4v9n myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Ready Ready less than a second ago 5p8b006uec125iq2892lxay64 \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete less than a second ago k8lpsvlak4b3nil0zfkexw61p \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 6 seconds ago vsunl5pi7e2n9ol3p89kvj6pn \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 11 seconds ago orxl8b6kt2l6dfznzzd4lij4s \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 17 seconds ago This patch changes the service's command to `sleep`, so that a successful task (after successfully performing `ls -l /etc/config`) continues to be running until the service is deleted. With that change, the service should (usually) reconcile immediately, which removes the race condition, and should also make it faster :) This patch changes the tests to use client.ServiceLogs() instead of using the service's tasklist to directly access container logs. This should also fix some failures that happened if some tasks failed to start before reconciling, in which case client.TaskList() (with the current filters), could return more tasks than anticipated (as it also contained the exited tasks); === RUN TestCreateServiceSecretFileMode create_test.go:291: assertion failed: 2 (int) != 1 (int) --- FAIL: TestCreateServiceSecretFileMode (7.88s) === RUN TestCreateServiceConfigFileMode create_test.go:355: assertion failed: 2 (int) != 1 (int) --- FAIL: TestCreateServiceConfigFileMode (7.87s)
null
2021-10-22 13:38:08+00:00
2021-10-27 10:29:24+00:00
integration/service/create_test.go
package service // import "github.com/docker/docker/integration/service" import ( "context" "fmt" "io" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/strslice" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/client" "github.com/docker/docker/errdefs" "github.com/docker/docker/integration/internal/network" "github.com/docker/docker/integration/internal/swarm" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestServiceCreateInit(t *testing.T) { defer setupTest(t)() t.Run("daemonInitDisabled", testServiceCreateInit(false)) t.Run("daemonInitEnabled", testServiceCreateInit(true)) } func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) { return func(t *testing.T) { var ops = []daemon.Option{} if daemonEnabled { ops = append(ops, daemon.WithInit()) } d := swarm.NewSwarm(t, testEnv, ops...) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() booleanTrue := true booleanFalse := false serviceID := swarm.CreateService(t, d) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i := inspectServiceContainer(t, client, serviceID) // HostConfig.Init == nil means that it delegates to daemon configuration assert.Check(t, i.HostConfig.Init == nil) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(true, *i.HostConfig.Init)) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(false, *i.HostConfig.Init)) } } func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON { t.Helper() filter := filters.NewArgs() filter.Add("label", fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID)) containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{Filters: filter}) assert.NilError(t, err) assert.Check(t, is.Len(containers, 1)) i, err := client.ContainerInspect(context.Background(), containers[0].ID) assert.NilError(t, err) return i } func TestCreateServiceMultipleTimes(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() overlayName := "overlay1_" + t.Name() overlayID := network.CreateNoError(ctx, t, client, overlayName, network.WithCheckDuplicate(), network.WithDriver("overlay"), ) var instances uint64 = 4 serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(overlayName), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) err = client.ServiceRemove(context.Background(), serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) serviceID2 := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll) err = client.ServiceRemove(context.Background(), serviceID2) assert.NilError(t, err) // we can't just wait on no tasks for the service, counter-intuitively. // Tasks may briefly exist but not show up, if they are are in the process // of being deallocated. To avoid this case, we should retry network remove // a few times, to give tasks time to be deallcoated poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll) for retry := 0; retry < 5; retry++ { err = client.NetworkRemove(context.Background(), overlayID) // TODO(dperny): using strings.Contains for error checking is awful, // but so is the fact that swarm functions don't return errdefs errors. // I don't have time at this moment to fix the latter, so I guess I'll // go with the former. // // The full error we're looking for is something like this: // // Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v // // The safest way to catch this, I think, will be to match on "is in // use by", as this is an uninterrupted string that best identifies // this error. if err == nil || !strings.Contains(err.Error(), "is in use by") { // if there is no error, or the error isn't this kind of error, // then we'll break the loop body, and either fail the test or // continue. break } } assert.NilError(t, err) poll.WaitOn(t, network.IsRemoved(context.Background(), client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceConflict(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) c := d.NewClientT(t) defer c.Close() ctx := context.Background() serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithName(serviceName), } swarm.CreateService(t, d, serviceSpec...) spec := swarm.CreateServiceSpec(t, serviceSpec...) _, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{}) assert.Check(t, errdefs.IsConflict(err)) assert.ErrorContains(t, err, "service "+serviceName+" already exists") } func TestCreateServiceMaxReplicas(t *testing.T) { defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() var maxReplicas uint64 = 2 serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(maxReplicas), swarm.ServiceWithMaxReplicas(maxReplicas), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, maxReplicas), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) } func TestCreateWithDuplicateNetworkNames(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() name := "foo_" + t.Name() n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) // Duplicates with name but with different driver n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay")) // Create Service with the same name var instances uint64 = 1 serviceName := "top_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(name), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target)) // Remove Service, and wait for its tasks to be removed err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) // Remove networks err = client.NetworkRemove(context.Background(), n3) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n2) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n1) assert.NilError(t, err) // Make sure networks have been destroyed. poll.WaitOn(t, network.IsRemoved(context.Background(), client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceSecretFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() secretName := "TestSecret_" + t.Name() secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ Annotations: swarmtypes.Annotations{ Name: secretName, }, Data: []byte("TESTSECRET"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret || /bin/top"}), swarm.ServiceWithSecret(&swarmtypes.SecretReference{ File: &swarmtypes.SecretReferenceFileTarget{ Name: "/etc/secret", UID: "0", GID: "0", Mode: 0777, }, SecretID: secretResp.ID, SecretName: secretName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) err = client.SecretRemove(ctx, secretName) assert.NilError(t, err) } func TestCreateServiceConfigFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() configName := "TestConfig_" + t.Name() configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ Annotations: swarmtypes.Annotations{ Name: configName, }, Data: []byte("TESTCONFIG"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config || /bin/top"}), swarm.ServiceWithReplicas(instances), swarm.ServiceWithConfig(&swarmtypes.ConfigReference{ File: &swarmtypes.ConfigReferenceFileTarget{ Name: "/etc/config", UID: "0", GID: "0", Mode: 0777, }, ConfigID: configResp.ID, ConfigName: configName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID)) err = client.ConfigRemove(ctx, configName) assert.NilError(t, err) } // TestServiceCreateSysctls tests that a service created with sysctl options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the sysctl option // // {"net.ipv4.ip_nonlocal_bind": "0"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // sysctl option with the correct value, we can assume that the sysctl has been // plumbed correctly. // // Next, we'll remove that service and create a new service with that option // set to 1. This means that no matter what the default is, we can be confident // that the sysctl option is applying as intended. // // Additionally, we'll do service and task inspects to verify that the inspect // output includes the desired sysctl option. // // We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly // confident won't be modified by the container runtime, and won't blow // anything up in the test environment func TestCreateServiceSysctls(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "setting service sysctls is unsupported before api v1.40", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // run thie block twice, so that no matter what the default value of // net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl // options works for _, expected := range []string{"0", "1"} { // store the map we're going to be using everywhere. expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected} // Create the service with the sysctl options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithSysctls(expectedSysctls), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the sysctl option set? // 2. Does the task have the sysctl in the spec? // 3. Does the service have the sysctl in the spec? // // if all 3 of these things are true, we know that the sysctl has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the sysctl set on the container inspect, // we know that the sysctl is plumbed correctly. everything below that // level has been tested elsewhere. (thanks @thaJeztah, because an // earlier version of this test had to get container logs and was much // more complex) // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the sysctl option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls) // verify that the task has the sysctl option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls) // verify that the service also has the sysctl set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls, ) } } // TestServiceCreateCapabilities tests that a service created with capabilities options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the capabilities option // // []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // capabilities option with the correct value, we can assume that the capabilities has been // plumbed correctly. func TestCreateServiceCapabilities(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"), "setting service capabilities is unsupported before api v1.41", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // store the map we're going to be using everywhere. capAdd := []string{"CAP_SYS_CHROOT"} capDrop := []string{"CAP_NET_RAW"} // Create the service with the capabilities options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithCapabilities(capAdd, capDrop), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the capabilities option set? // 2. Does the task have the capabilities in the spec? // 3. Does the service have the capabilities in the spec? // // if all 3 of these things are true, we know that the capabilities has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the capabilities set on the container inspect, // we know that the capabilities is plumbed correctly. everything below that // level has been tested elsewhere. // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the capabilities option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.CapAdd, strslice.StrSlice(capAdd)) assert.DeepEqual(t, ctnr.HostConfig.CapDrop, strslice.StrSlice(capDrop)) // verify that the task has the capabilities option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityDrop, capDrop) // verify that the service also has the capabilities set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, capDrop) }
package service // import "github.com/docker/docker/integration/service" import ( "context" "fmt" "io" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/strslice" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/client" "github.com/docker/docker/errdefs" "github.com/docker/docker/integration/internal/network" "github.com/docker/docker/integration/internal/swarm" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestServiceCreateInit(t *testing.T) { defer setupTest(t)() t.Run("daemonInitDisabled", testServiceCreateInit(false)) t.Run("daemonInitEnabled", testServiceCreateInit(true)) } func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) { return func(t *testing.T) { var ops = []daemon.Option{} if daemonEnabled { ops = append(ops, daemon.WithInit()) } d := swarm.NewSwarm(t, testEnv, ops...) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() booleanTrue := true booleanFalse := false serviceID := swarm.CreateService(t, d) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i := inspectServiceContainer(t, client, serviceID) // HostConfig.Init == nil means that it delegates to daemon configuration assert.Check(t, i.HostConfig.Init == nil) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(true, *i.HostConfig.Init)) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(false, *i.HostConfig.Init)) } } func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON { t.Helper() filter := filters.NewArgs() filter.Add("label", fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID)) containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{Filters: filter}) assert.NilError(t, err) assert.Check(t, is.Len(containers, 1)) i, err := client.ContainerInspect(context.Background(), containers[0].ID) assert.NilError(t, err) return i } func TestCreateServiceMultipleTimes(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() overlayName := "overlay1_" + t.Name() overlayID := network.CreateNoError(ctx, t, client, overlayName, network.WithCheckDuplicate(), network.WithDriver("overlay"), ) var instances uint64 = 4 serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(overlayName), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) err = client.ServiceRemove(context.Background(), serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) serviceID2 := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll) err = client.ServiceRemove(context.Background(), serviceID2) assert.NilError(t, err) // we can't just wait on no tasks for the service, counter-intuitively. // Tasks may briefly exist but not show up, if they are are in the process // of being deallocated. To avoid this case, we should retry network remove // a few times, to give tasks time to be deallcoated poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll) for retry := 0; retry < 5; retry++ { err = client.NetworkRemove(context.Background(), overlayID) // TODO(dperny): using strings.Contains for error checking is awful, // but so is the fact that swarm functions don't return errdefs errors. // I don't have time at this moment to fix the latter, so I guess I'll // go with the former. // // The full error we're looking for is something like this: // // Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v // // The safest way to catch this, I think, will be to match on "is in // use by", as this is an uninterrupted string that best identifies // this error. if err == nil || !strings.Contains(err.Error(), "is in use by") { // if there is no error, or the error isn't this kind of error, // then we'll break the loop body, and either fail the test or // continue. break } } assert.NilError(t, err) poll.WaitOn(t, network.IsRemoved(context.Background(), client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceConflict(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) c := d.NewClientT(t) defer c.Close() ctx := context.Background() serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithName(serviceName), } swarm.CreateService(t, d, serviceSpec...) spec := swarm.CreateServiceSpec(t, serviceSpec...) _, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{}) assert.Check(t, errdefs.IsConflict(err)) assert.ErrorContains(t, err, "service "+serviceName+" already exists") } func TestCreateServiceMaxReplicas(t *testing.T) { defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() var maxReplicas uint64 = 2 serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(maxReplicas), swarm.ServiceWithMaxReplicas(maxReplicas), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, maxReplicas), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) } func TestCreateWithDuplicateNetworkNames(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() name := "foo_" + t.Name() n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) // Duplicates with name but with different driver n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay")) // Create Service with the same name var instances uint64 = 1 serviceName := "top_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(name), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target)) // Remove Service, and wait for its tasks to be removed err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) // Remove networks err = client.NetworkRemove(context.Background(), n3) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n2) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n1) assert.NilError(t, err) // Make sure networks have been destroyed. poll.WaitOn(t, network.IsRemoved(context.Background(), client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceSecretFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() secretName := "TestSecret_" + t.Name() secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ Annotations: swarmtypes.Annotations{ Name: secretName, }, Data: []byte("TESTSECRET"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret && sleep inf"}), swarm.ServiceWithSecret(&swarmtypes.SecretReference{ File: &swarmtypes.SecretReferenceFileTarget{ Name: "/etc/secret", UID: "0", GID: "0", Mode: 0777, }, SecretID: secretResp.ID, SecretName: secretName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) body, err := client.ServiceLogs(ctx, serviceID, types.ContainerLogsOptions{ Tail: "1", ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) err = client.SecretRemove(ctx, secretName) assert.NilError(t, err) } func TestCreateServiceConfigFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() configName := "TestConfig_" + t.Name() configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ Annotations: swarmtypes.Annotations{ Name: configName, }, Data: []byte("TESTCONFIG"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config && sleep inf"}), swarm.ServiceWithReplicas(instances), swarm.ServiceWithConfig(&swarmtypes.ConfigReference{ File: &swarmtypes.ConfigReferenceFileTarget{ Name: "/etc/config", UID: "0", GID: "0", Mode: 0777, }, ConfigID: configResp.ID, ConfigName: configName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) body, err := client.ServiceLogs(ctx, serviceID, types.ContainerLogsOptions{ Tail: "1", ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID)) err = client.ConfigRemove(ctx, configName) assert.NilError(t, err) } // TestServiceCreateSysctls tests that a service created with sysctl options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the sysctl option // // {"net.ipv4.ip_nonlocal_bind": "0"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // sysctl option with the correct value, we can assume that the sysctl has been // plumbed correctly. // // Next, we'll remove that service and create a new service with that option // set to 1. This means that no matter what the default is, we can be confident // that the sysctl option is applying as intended. // // Additionally, we'll do service and task inspects to verify that the inspect // output includes the desired sysctl option. // // We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly // confident won't be modified by the container runtime, and won't blow // anything up in the test environment func TestCreateServiceSysctls(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "setting service sysctls is unsupported before api v1.40", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // run thie block twice, so that no matter what the default value of // net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl // options works for _, expected := range []string{"0", "1"} { // store the map we're going to be using everywhere. expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected} // Create the service with the sysctl options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithSysctls(expectedSysctls), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the sysctl option set? // 2. Does the task have the sysctl in the spec? // 3. Does the service have the sysctl in the spec? // // if all 3 of these things are true, we know that the sysctl has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the sysctl set on the container inspect, // we know that the sysctl is plumbed correctly. everything below that // level has been tested elsewhere. (thanks @thaJeztah, because an // earlier version of this test had to get container logs and was much // more complex) // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the sysctl option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls) // verify that the task has the sysctl option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls) // verify that the service also has the sysctl set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls, ) } } // TestServiceCreateCapabilities tests that a service created with capabilities options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the capabilities option // // []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // capabilities option with the correct value, we can assume that the capabilities has been // plumbed correctly. func TestCreateServiceCapabilities(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"), "setting service capabilities is unsupported before api v1.41", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // store the map we're going to be using everywhere. capAdd := []string{"CAP_SYS_CHROOT"} capDrop := []string{"CAP_NET_RAW"} // Create the service with the capabilities options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithCapabilities(capAdd, capDrop), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the capabilities option set? // 2. Does the task have the capabilities in the spec? // 3. Does the service have the capabilities in the spec? // // if all 3 of these things are true, we know that the capabilities has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the capabilities set on the container inspect, // we know that the capabilities is plumbed correctly. everything below that // level has been tested elsewhere. // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the capabilities option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.CapAdd, strslice.StrSlice(capAdd)) assert.DeepEqual(t, ctnr.HostConfig.CapDrop, strslice.StrSlice(capDrop)) // verify that the task has the capabilities option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityDrop, capDrop) // verify that the service also has the capabilities set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, capDrop) }
thaJeztah
9aa0b24dcfa8956e03644ec43cf09dadcd5b6223
aef8e4817251278524ed34a41cb456f5c7cf519b
Not super familiar with this test code, but what image is it running? Some `sleep(1)` implementations (including the GNU coreutils version) accept `inf` as an argument to sleep indefinitely. That might help here rather than assuming the test will be done before 600s have elapsed.
samuelkarp
4,414
moby/moby
42,960
Fix TestCreateServiceSecretFileMode, TestCreateServiceConfigFileMode
hopefully fixes https://github.com/moby/moby/issues/37132 Looks like this test was broken from the start, and fully relied on a race condition. (Test was added in 65ee7fff02111bf696bc2fec442d07c2957f4151 / https://github.com/moby/moby/pull/36130) The problem is in the service's command: `ls -l /etc/config || /bin/top`, which will either: - exit immediately if the secret is mounted correctly at `/etc/config` (which it should) - keep running with `/bin/top` if the above failed After the service is created, the test enters a race-condition, checking for 1 task to be running (which it ocassionally is), after which it proceeds, and looks up the list of tasks of the service, to get the log output of `ls -l /etc/config`. This is another race: first of all, the original filter for that task lookup did not filter by `running`, so it would pick "any" task of the service (either failed, running, or "completed" (successfully exited) tasks). In the meantime though, SwarmKit kept reconciling the service, and creating new tasks, so even if the test was able to get the ID of the correct task, that task may already have been exited, and removed (task-limit is 5 by default), so only if the test was "lucky", it would be able to get the logs, but of course, chances were likely that it would be "too late", and the task already gone. The problem can be easily reproduced when running the steps manually: echo 'CONFIG' | docker config create myconfig - docker service create --config source=myconfig,target=/etc/config,mode=0777 --name myservice busybox sh -c 'ls -l /etc/config || /bin/top' The above creates the service, but it keeps retrying, because each task exits immediately (followed by SwarmKit reconciling and starting a new task); mjntpfkkyuuc1dpay4h00c4oo overall progress: 0 out of 1 tasks 1/1: ready [======================================> ] verify: Detected task failure ^COperation continuing in background. Use `docker service ps mjntpfkkyuuc1dpay4h00c4oo` to check progress. And checking the tasks for the service reveals that tasks exit cleanly (no error), but _do exit_, so swarm just keeps up reconciling, and spinning up new tasks; docker service ps myservice --no-trunc ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS 2wmcuv4vffnet8nybg3he4v9n myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Ready Ready less than a second ago 5p8b006uec125iq2892lxay64 \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete less than a second ago k8lpsvlak4b3nil0zfkexw61p \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 6 seconds ago vsunl5pi7e2n9ol3p89kvj6pn \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 11 seconds ago orxl8b6kt2l6dfznzzd4lij4s \_ myservice.1 busybox:latest@sha256:f7ca5a32c10d51aeda3b4d01c61c6061f497893d7f6628b92f822f7117182a57 docker-desktop Shutdown Complete 17 seconds ago This patch changes the service's command to `sleep`, so that a successful task (after successfully performing `ls -l /etc/config`) continues to be running until the service is deleted. With that change, the service should (usually) reconcile immediately, which removes the race condition, and should also make it faster :) This patch changes the tests to use client.ServiceLogs() instead of using the service's tasklist to directly access container logs. This should also fix some failures that happened if some tasks failed to start before reconciling, in which case client.TaskList() (with the current filters), could return more tasks than anticipated (as it also contained the exited tasks); === RUN TestCreateServiceSecretFileMode create_test.go:291: assertion failed: 2 (int) != 1 (int) --- FAIL: TestCreateServiceSecretFileMode (7.88s) === RUN TestCreateServiceConfigFileMode create_test.go:355: assertion failed: 2 (int) != 1 (int) --- FAIL: TestCreateServiceConfigFileMode (7.87s)
null
2021-10-22 13:38:08+00:00
2021-10-27 10:29:24+00:00
integration/service/create_test.go
package service // import "github.com/docker/docker/integration/service" import ( "context" "fmt" "io" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/strslice" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/client" "github.com/docker/docker/errdefs" "github.com/docker/docker/integration/internal/network" "github.com/docker/docker/integration/internal/swarm" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestServiceCreateInit(t *testing.T) { defer setupTest(t)() t.Run("daemonInitDisabled", testServiceCreateInit(false)) t.Run("daemonInitEnabled", testServiceCreateInit(true)) } func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) { return func(t *testing.T) { var ops = []daemon.Option{} if daemonEnabled { ops = append(ops, daemon.WithInit()) } d := swarm.NewSwarm(t, testEnv, ops...) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() booleanTrue := true booleanFalse := false serviceID := swarm.CreateService(t, d) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i := inspectServiceContainer(t, client, serviceID) // HostConfig.Init == nil means that it delegates to daemon configuration assert.Check(t, i.HostConfig.Init == nil) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(true, *i.HostConfig.Init)) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(false, *i.HostConfig.Init)) } } func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON { t.Helper() filter := filters.NewArgs() filter.Add("label", fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID)) containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{Filters: filter}) assert.NilError(t, err) assert.Check(t, is.Len(containers, 1)) i, err := client.ContainerInspect(context.Background(), containers[0].ID) assert.NilError(t, err) return i } func TestCreateServiceMultipleTimes(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() overlayName := "overlay1_" + t.Name() overlayID := network.CreateNoError(ctx, t, client, overlayName, network.WithCheckDuplicate(), network.WithDriver("overlay"), ) var instances uint64 = 4 serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(overlayName), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) err = client.ServiceRemove(context.Background(), serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) serviceID2 := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll) err = client.ServiceRemove(context.Background(), serviceID2) assert.NilError(t, err) // we can't just wait on no tasks for the service, counter-intuitively. // Tasks may briefly exist but not show up, if they are are in the process // of being deallocated. To avoid this case, we should retry network remove // a few times, to give tasks time to be deallcoated poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll) for retry := 0; retry < 5; retry++ { err = client.NetworkRemove(context.Background(), overlayID) // TODO(dperny): using strings.Contains for error checking is awful, // but so is the fact that swarm functions don't return errdefs errors. // I don't have time at this moment to fix the latter, so I guess I'll // go with the former. // // The full error we're looking for is something like this: // // Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v // // The safest way to catch this, I think, will be to match on "is in // use by", as this is an uninterrupted string that best identifies // this error. if err == nil || !strings.Contains(err.Error(), "is in use by") { // if there is no error, or the error isn't this kind of error, // then we'll break the loop body, and either fail the test or // continue. break } } assert.NilError(t, err) poll.WaitOn(t, network.IsRemoved(context.Background(), client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceConflict(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) c := d.NewClientT(t) defer c.Close() ctx := context.Background() serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithName(serviceName), } swarm.CreateService(t, d, serviceSpec...) spec := swarm.CreateServiceSpec(t, serviceSpec...) _, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{}) assert.Check(t, errdefs.IsConflict(err)) assert.ErrorContains(t, err, "service "+serviceName+" already exists") } func TestCreateServiceMaxReplicas(t *testing.T) { defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() var maxReplicas uint64 = 2 serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(maxReplicas), swarm.ServiceWithMaxReplicas(maxReplicas), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, maxReplicas), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) } func TestCreateWithDuplicateNetworkNames(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() name := "foo_" + t.Name() n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) // Duplicates with name but with different driver n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay")) // Create Service with the same name var instances uint64 = 1 serviceName := "top_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(name), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target)) // Remove Service, and wait for its tasks to be removed err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) // Remove networks err = client.NetworkRemove(context.Background(), n3) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n2) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n1) assert.NilError(t, err) // Make sure networks have been destroyed. poll.WaitOn(t, network.IsRemoved(context.Background(), client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceSecretFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() secretName := "TestSecret_" + t.Name() secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ Annotations: swarmtypes.Annotations{ Name: secretName, }, Data: []byte("TESTSECRET"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret || /bin/top"}), swarm.ServiceWithSecret(&swarmtypes.SecretReference{ File: &swarmtypes.SecretReferenceFileTarget{ Name: "/etc/secret", UID: "0", GID: "0", Mode: 0777, }, SecretID: secretResp.ID, SecretName: secretName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) err = client.SecretRemove(ctx, secretName) assert.NilError(t, err) } func TestCreateServiceConfigFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() configName := "TestConfig_" + t.Name() configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ Annotations: swarmtypes.Annotations{ Name: configName, }, Data: []byte("TESTCONFIG"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config || /bin/top"}), swarm.ServiceWithReplicas(instances), swarm.ServiceWithConfig(&swarmtypes.ConfigReference{ File: &swarmtypes.ConfigReferenceFileTarget{ Name: "/etc/config", UID: "0", GID: "0", Mode: 0777, }, ConfigID: configResp.ID, ConfigName: configName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID)) err = client.ConfigRemove(ctx, configName) assert.NilError(t, err) } // TestServiceCreateSysctls tests that a service created with sysctl options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the sysctl option // // {"net.ipv4.ip_nonlocal_bind": "0"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // sysctl option with the correct value, we can assume that the sysctl has been // plumbed correctly. // // Next, we'll remove that service and create a new service with that option // set to 1. This means that no matter what the default is, we can be confident // that the sysctl option is applying as intended. // // Additionally, we'll do service and task inspects to verify that the inspect // output includes the desired sysctl option. // // We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly // confident won't be modified by the container runtime, and won't blow // anything up in the test environment func TestCreateServiceSysctls(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "setting service sysctls is unsupported before api v1.40", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // run thie block twice, so that no matter what the default value of // net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl // options works for _, expected := range []string{"0", "1"} { // store the map we're going to be using everywhere. expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected} // Create the service with the sysctl options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithSysctls(expectedSysctls), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the sysctl option set? // 2. Does the task have the sysctl in the spec? // 3. Does the service have the sysctl in the spec? // // if all 3 of these things are true, we know that the sysctl has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the sysctl set on the container inspect, // we know that the sysctl is plumbed correctly. everything below that // level has been tested elsewhere. (thanks @thaJeztah, because an // earlier version of this test had to get container logs and was much // more complex) // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the sysctl option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls) // verify that the task has the sysctl option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls) // verify that the service also has the sysctl set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls, ) } } // TestServiceCreateCapabilities tests that a service created with capabilities options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the capabilities option // // []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // capabilities option with the correct value, we can assume that the capabilities has been // plumbed correctly. func TestCreateServiceCapabilities(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"), "setting service capabilities is unsupported before api v1.41", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // store the map we're going to be using everywhere. capAdd := []string{"CAP_SYS_CHROOT"} capDrop := []string{"CAP_NET_RAW"} // Create the service with the capabilities options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithCapabilities(capAdd, capDrop), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the capabilities option set? // 2. Does the task have the capabilities in the spec? // 3. Does the service have the capabilities in the spec? // // if all 3 of these things are true, we know that the capabilities has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the capabilities set on the container inspect, // we know that the capabilities is plumbed correctly. everything below that // level has been tested elsewhere. // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the capabilities option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.CapAdd, strslice.StrSlice(capAdd)) assert.DeepEqual(t, ctnr.HostConfig.CapDrop, strslice.StrSlice(capDrop)) // verify that the task has the capabilities option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityDrop, capDrop) // verify that the service also has the capabilities set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, capDrop) }
package service // import "github.com/docker/docker/integration/service" import ( "context" "fmt" "io" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/strslice" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/client" "github.com/docker/docker/errdefs" "github.com/docker/docker/integration/internal/network" "github.com/docker/docker/integration/internal/swarm" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) func TestServiceCreateInit(t *testing.T) { defer setupTest(t)() t.Run("daemonInitDisabled", testServiceCreateInit(false)) t.Run("daemonInitEnabled", testServiceCreateInit(true)) } func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) { return func(t *testing.T) { var ops = []daemon.Option{} if daemonEnabled { ops = append(ops, daemon.WithInit()) } d := swarm.NewSwarm(t, testEnv, ops...) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() booleanTrue := true booleanFalse := false serviceID := swarm.CreateService(t, d) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i := inspectServiceContainer(t, client, serviceID) // HostConfig.Init == nil means that it delegates to daemon configuration assert.Check(t, i.HostConfig.Init == nil) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(true, *i.HostConfig.Init)) serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse)) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll) i = inspectServiceContainer(t, client, serviceID) assert.Check(t, is.Equal(false, *i.HostConfig.Init)) } } func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON { t.Helper() filter := filters.NewArgs() filter.Add("label", fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID)) containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{Filters: filter}) assert.NilError(t, err) assert.Check(t, is.Len(containers, 1)) i, err := client.ContainerInspect(context.Background(), containers[0].ID) assert.NilError(t, err) return i } func TestCreateServiceMultipleTimes(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() overlayName := "overlay1_" + t.Name() overlayID := network.CreateNoError(ctx, t, client, overlayName, network.WithCheckDuplicate(), network.WithDriver("overlay"), ) var instances uint64 = 4 serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(overlayName), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) err = client.ServiceRemove(context.Background(), serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) serviceID2 := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll) err = client.ServiceRemove(context.Background(), serviceID2) assert.NilError(t, err) // we can't just wait on no tasks for the service, counter-intuitively. // Tasks may briefly exist but not show up, if they are are in the process // of being deallocated. To avoid this case, we should retry network remove // a few times, to give tasks time to be deallcoated poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll) for retry := 0; retry < 5; retry++ { err = client.NetworkRemove(context.Background(), overlayID) // TODO(dperny): using strings.Contains for error checking is awful, // but so is the fact that swarm functions don't return errdefs errors. // I don't have time at this moment to fix the latter, so I guess I'll // go with the former. // // The full error we're looking for is something like this: // // Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v // // The safest way to catch this, I think, will be to match on "is in // use by", as this is an uninterrupted string that best identifies // this error. if err == nil || !strings.Contains(err.Error(), "is in use by") { // if there is no error, or the error isn't this kind of error, // then we'll break the loop body, and either fail the test or // continue. break } } assert.NilError(t, err) poll.WaitOn(t, network.IsRemoved(context.Background(), client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceConflict(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) c := d.NewClientT(t) defer c.Close() ctx := context.Background() serviceName := "TestService_" + t.Name() serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithName(serviceName), } swarm.CreateService(t, d, serviceSpec...) spec := swarm.CreateServiceSpec(t, serviceSpec...) _, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{}) assert.Check(t, errdefs.IsConflict(err)) assert.ErrorContains(t, err, "service "+serviceName+" already exists") } func TestCreateServiceMaxReplicas(t *testing.T) { defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() var maxReplicas uint64 = 2 serviceSpec := []swarm.ServiceSpecOpt{ swarm.ServiceWithReplicas(maxReplicas), swarm.ServiceWithMaxReplicas(maxReplicas), } serviceID := swarm.CreateService(t, d, serviceSpec...) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, maxReplicas), swarm.ServicePoll) _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) } func TestCreateWithDuplicateNetworkNames(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() name := "foo_" + t.Name() n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge")) // Duplicates with name but with different driver n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay")) // Create Service with the same name var instances uint64 = 1 serviceName := "top_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithNetwork(name), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target)) // Remove Service, and wait for its tasks to be removed err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) // Remove networks err = client.NetworkRemove(context.Background(), n3) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n2) assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n1) assert.NilError(t, err) // Make sure networks have been destroyed. poll.WaitOn(t, network.IsRemoved(context.Background(), client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) poll.WaitOn(t, network.IsRemoved(context.Background(), client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } func TestCreateServiceSecretFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() secretName := "TestSecret_" + t.Name() secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ Annotations: swarmtypes.Annotations{ Name: secretName, }, Data: []byte("TESTSECRET"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithReplicas(instances), swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret && sleep inf"}), swarm.ServiceWithSecret(&swarmtypes.SecretReference{ File: &swarmtypes.SecretReferenceFileTarget{ Name: "/etc/secret", UID: "0", GID: "0", Mode: 0777, }, SecretID: secretResp.ID, SecretName: secretName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll) body, err := client.ServiceLogs(ctx, serviceID, types.ContainerLogsOptions{ Tail: "1", ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll) err = client.SecretRemove(ctx, secretName) assert.NilError(t, err) } func TestCreateServiceConfigFileMode(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() configName := "TestConfig_" + t.Name() configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ Annotations: swarmtypes.Annotations{ Name: configName, }, Data: []byte("TESTCONFIG"), }) assert.NilError(t, err) var instances uint64 = 1 serviceName := "TestService_" + t.Name() serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName), swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config && sleep inf"}), swarm.ServiceWithReplicas(instances), swarm.ServiceWithConfig(&swarmtypes.ConfigReference{ File: &swarmtypes.ConfigReferenceFileTarget{ Name: "/etc/config", UID: "0", GID: "0", Mode: 0777, }, ConfigID: configResp.ID, ConfigName: configName, }), ) poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) body, err := client.ServiceLogs(ctx, serviceID, types.ContainerLogsOptions{ Tail: "1", ShowStdout: true, }) assert.NilError(t, err) defer body.Close() content, err := io.ReadAll(body) assert.NilError(t, err) assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceID) assert.NilError(t, err) poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID)) err = client.ConfigRemove(ctx, configName) assert.NilError(t, err) } // TestServiceCreateSysctls tests that a service created with sysctl options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the sysctl option // // {"net.ipv4.ip_nonlocal_bind": "0"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // sysctl option with the correct value, we can assume that the sysctl has been // plumbed correctly. // // Next, we'll remove that service and create a new service with that option // set to 1. This means that no matter what the default is, we can be confident // that the sysctl option is applying as intended. // // Additionally, we'll do service and task inspects to verify that the inspect // output includes the desired sysctl option. // // We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly // confident won't be modified by the container runtime, and won't blow // anything up in the test environment func TestCreateServiceSysctls(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "setting service sysctls is unsupported before api v1.40", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // run thie block twice, so that no matter what the default value of // net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl // options works for _, expected := range []string{"0", "1"} { // store the map we're going to be using everywhere. expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected} // Create the service with the sysctl options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithSysctls(expectedSysctls), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the sysctl option set? // 2. Does the task have the sysctl in the spec? // 3. Does the service have the sysctl in the spec? // // if all 3 of these things are true, we know that the sysctl has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the sysctl set on the container inspect, // we know that the sysctl is plumbed correctly. everything below that // level has been tested elsewhere. (thanks @thaJeztah, because an // earlier version of this test had to get container logs and was much // more complex) // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the sysctl option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls) // verify that the task has the sysctl option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls) // verify that the service also has the sysctl set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls, ) } } // TestServiceCreateCapabilities tests that a service created with capabilities options in // the ContainerSpec correctly applies those options. // // To test this, we're going to create a service with the capabilities option // // []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"} // // We'll get the service's tasks to get the container ID, and then we'll // inspect the container. If the output of the container inspect contains the // capabilities option with the correct value, we can assume that the capabilities has been // plumbed correctly. func TestCreateServiceCapabilities(t *testing.T) { skip.If( t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"), "setting service capabilities is unsupported before api v1.41", ) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.Background() // store the map we're going to be using everywhere. capAdd := []string{"CAP_SYS_CHROOT"} capDrop := []string{"CAP_NET_RAW"} // Create the service with the capabilities options var instances uint64 = 1 serviceID := swarm.CreateService(t, d, swarm.ServiceWithCapabilities(capAdd, capDrop), ) // wait for the service to converge to 1 running task as expected poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances)) // we're going to check 3 things: // // 1. Does the container, when inspected, have the capabilities option set? // 2. Does the task have the capabilities in the spec? // 3. Does the service have the capabilities in the spec? // // if all 3 of these things are true, we know that the capabilities has been // plumbed correctly through the engine. // // We don't actually have to get inside the container and check its // logs or anything. If we see the capabilities set on the container inspect, // we know that the capabilities is plumbed correctly. everything below that // level has been tested elsewhere. // get all of the tasks of the service, so we can get the container filter := filters.NewArgs() filter.Add("service", serviceID) tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) assert.NilError(t, err) assert.Check(t, is.Equal(len(tasks), 1)) // verify that the container has the capabilities option set ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID) assert.NilError(t, err) assert.DeepEqual(t, ctnr.HostConfig.CapAdd, strslice.StrSlice(capAdd)) assert.DeepEqual(t, ctnr.HostConfig.CapDrop, strslice.StrSlice(capDrop)) // verify that the task has the capabilities option set in the task object assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.CapabilityDrop, capDrop) // verify that the service also has the capabilities set in the spec. service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) assert.NilError(t, err) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityAdd, capAdd) assert.DeepEqual(t, service.Spec.TaskTemplate.ContainerSpec.CapabilityDrop, capDrop) }
thaJeztah
9aa0b24dcfa8956e03644ec43cf09dadcd5b6223
aef8e4817251278524ed34a41cb456f5c7cf519b
> Not super familiar with this test code, but what image is it running? The default for these tests is to use `busybox`. The `Dockerfile` has a `frozen-images` stage, which downloads some images that are used in integration tests. Originally this was added to; - be sure we always use the exact same image - speed up the tests (no `docker pull` for each of the tests); this also helped with hiccups / outages of Docker Hub, if they would happen Nowadays it also comes in handy with rate limits 😅 (although, we should set up a "pull only" token in CI) > Some sleep(1) implementations (including the GNU coreutils version) accept inf as an argument to sleep indefinitely. That might help here rather than assuming the test will be done before 600s have elapsed. Oh, TIL about `inf` being a valid option. Looks like busybox `sleep` supports that value. I'll update the test; I suspect there will be various other tests that used `<some arbitrary, randomly picked long timeout>`; we can go through our tests and update those as well.
thaJeztah
4,415
moby/moby
42,958
Fix racey TestHealthKillContainer
Before this change if you assume that things work the way the test expects them to (it does not, but lets assume for now) we aren't really testing anything because we are testing that a container is healthy before and after we send a signal. This will give false positives even if there is a bug in the underlying code. Sending a signal can take any amount of time to cause a container to exit or to trigger healthchecks to stop or whatever. Now lets remove the assumption that things are working as expected, because they are not. In this case, `top` (which is what is running in the container) is actually exiting when it receives `USR1`. This totally invalidates the test. We need more control and knowledge as to what is happening in the container to properly test this. This change introduces a custom script which traps `USR1` and flips the health status each time the signal is received. We then send the signal twice so that we know the change has occurred and check that the value has flipped so that we know the change has actually occurred. Fixes #41930
null
2021-10-21 19:35:18+00:00
2021-10-21 23:04:12+00:00
integration/container/health_test.go
package container // import "github.com/docker/docker/integration/container" import ( "context" "testing" "time" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "gotest.tools/v3/assert" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) // TestHealthCheckWorkdir verifies that health-checks inherit the containers' // working-dir. func TestHealthCheckWorkdir(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "FIXME") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithTty(true), container.WithWorkingDir("/foo"), func(c *container.TestContainerConfig) { c.Config.Healthcheck = &containertypes.HealthConfig{ Test: []string{"CMD-SHELL", "if [ \"$PWD\" = \"/foo\" ]; then exit 0; else exit 1; fi;"}, Interval: 50 * time.Millisecond, Retries: 3, } }) poll.WaitOn(t, pollForHealthStatus(ctx, client, cID, types.Healthy), poll.WithDelay(100*time.Millisecond)) } // GitHub #37263 // Do not stop healthchecks just because we sent a signal to the container func TestHealthKillContainer(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports SIGKILL and SIGTERM? See https://github.com/moby/moby/issues/39574") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() id := container.Run(ctx, t, client, func(c *container.TestContainerConfig) { c.Config.Healthcheck = &containertypes.HealthConfig{ Test: []string{"CMD-SHELL", "sleep 1"}, Interval: time.Second, Retries: 5, } }) ctxPoll, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() poll.WaitOn(t, pollForHealthStatus(ctxPoll, client, id, "healthy"), poll.WithDelay(100*time.Millisecond)) err := client.ContainerKill(ctx, id, "SIGUSR1") assert.NilError(t, err) ctxPoll, cancel = context.WithTimeout(ctx, 30*time.Second) defer cancel() poll.WaitOn(t, pollForHealthStatus(ctxPoll, client, id, "healthy"), poll.WithDelay(100*time.Millisecond)) } func pollForHealthStatus(ctx context.Context, client client.APIClient, containerID string, healthStatus string) func(log poll.LogT) poll.Result { return func(log poll.LogT) poll.Result { inspect, err := client.ContainerInspect(ctx, containerID) switch { case err != nil: return poll.Error(err) case inspect.State.Health.Status == healthStatus: return poll.Success() default: return poll.Continue("waiting for container to become %s", healthStatus) } } }
package container // import "github.com/docker/docker/integration/container" import ( "context" "testing" "time" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "gotest.tools/v3/assert" "gotest.tools/v3/poll" "gotest.tools/v3/skip" ) // TestHealthCheckWorkdir verifies that health-checks inherit the containers' // working-dir. func TestHealthCheckWorkdir(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "FIXME") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() cID := container.Run(ctx, t, client, container.WithTty(true), container.WithWorkingDir("/foo"), func(c *container.TestContainerConfig) { c.Config.Healthcheck = &containertypes.HealthConfig{ Test: []string{"CMD-SHELL", "if [ \"$PWD\" = \"/foo\" ]; then exit 0; else exit 1; fi;"}, Interval: 50 * time.Millisecond, Retries: 3, } }) poll.WaitOn(t, pollForHealthStatus(ctx, client, cID, types.Healthy), poll.WithDelay(100*time.Millisecond)) } // GitHub #37263 // Do not stop healthchecks just because we sent a signal to the container func TestHealthKillContainer(t *testing.T) { skip.If(t, testEnv.OSType == "windows", "Windows only supports SIGKILL and SIGTERM? See https://github.com/moby/moby/issues/39574") defer setupTest(t)() ctx := context.Background() client := testEnv.APIClient() id := container.Run(ctx, t, client, func(c *container.TestContainerConfig) { cmd := ` # Set the initial HEALTH value so the healthcheck passes HEALTH="1" echo $HEALTH > /health # Any time doHealth is run we flip the value # This lets us use kill signals to determine when healtchecks have run. doHealth() { case "$HEALTH" in "0") HEALTH="1" ;; "1") HEALTH="0" ;; esac echo $HEALTH > /health } trap 'doHealth' USR1 while true; do sleep 1; done ` c.Config.Cmd = []string{"/bin/sh", "-c", cmd} c.Config.Healthcheck = &containertypes.HealthConfig{ Test: []string{"CMD-SHELL", `[ "$(cat /health)" = "1" ]`}, Interval: time.Second, Retries: 5, } }) ctxPoll, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() poll.WaitOn(t, pollForHealthStatus(ctxPoll, client, id, "healthy"), poll.WithDelay(100*time.Millisecond)) err := client.ContainerKill(ctx, id, "SIGUSR1") assert.NilError(t, err) ctxPoll, cancel = context.WithTimeout(ctx, 30*time.Second) defer cancel() poll.WaitOn(t, pollForHealthStatus(ctxPoll, client, id, "unhealthy"), poll.WithDelay(100*time.Millisecond)) err = client.ContainerKill(ctx, id, "SIGUSR1") assert.NilError(t, err) ctxPoll, cancel = context.WithTimeout(ctx, 30*time.Second) defer cancel() poll.WaitOn(t, pollForHealthStatus(ctxPoll, client, id, "healthy"), poll.WithDelay(100*time.Millisecond)) } func pollForHealthStatus(ctx context.Context, client client.APIClient, containerID string, healthStatus string) func(log poll.LogT) poll.Result { return func(log poll.LogT) poll.Result { inspect, err := client.ContainerInspect(ctx, containerID) switch { case err != nil: return poll.Error(err) case inspect.State.Health.Status == healthStatus: return poll.Success() default: return poll.Continue("waiting for container to become %s", healthStatus) } } }
cpuguy83
8f4d85801f46a3b3eb930892c71d3cf74f97d94f
bb6fdac7a25121aee9e08bd297825b42d3cdd358
Actually surprised the multiline `cmd` works. Makes sense that it does, just that I didn't directly thing of doing it this way (without writing it to a file) Nice! 👍👍
thaJeztah
4,416
moby/moby
42,946
dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior
**- What I did** `XDG_RUNTIME_DIR` variable not setting, check should be fail. ``` [root@master ~]# echo $XDG_RUNTIME_DIR /run/user/0 [root@master ~]# su - foouser Last login: Sun Oct 17 17:52:14 CST 2021 on pts/0 [foouser@master ~]$ id foouser uid=1000(foouser) gid=1000(foouser) groups=1000(foouser) [foouser@master ~]$ echo $XDG_RUNTIME_DIR [foouser@master ~]$ systemctl --user show-environment Failed to get D-Bus connection: Connection refused ``` ``` $ cat test.sh echo "orign value=$XDG_RUNTIME_DIR" echo "1. with [ ] not quote ..." [ -w $XDG_RUNTIME_DIR ] echo "get 1 ret_code: $?" echo "2. with [ ] and quote ..." [ -w "$XDG_RUNTIME_DIR" ] echo "get 2 ret_code: $?" $ sh ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 [foouser@bk-master ~]$ bash ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 ``` **- How I did it** Quoted from `Advanced Bash-Scripting Guide` https://tldp.org/LDP/abs/html/gotchas.html > Sometimes variables within "test" brackets ([ ]) need to be quoted (double quotes). Failure to do so may cause unexpected behavior. See Example 7-6, Example 20-5, and Example 9-6. **- How to verify it** Updated `/bin/dockerd-rootless.sh` manually and run it: ``` [foouser@bk-master ~]$ echo $XDG_RUNTIME_DIR [foouser@bk-master ~]$ dockerd-rootless.sh --help + case "$1" in + '[' -w '' ']' + echo 'XDG_RUNTIME_DIR needs to be set and writable' XDG_RUNTIME_DIR needs to be set and writable + exit 1 ``` **- Description for the changelog** dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior Signed-off-by: Chenyang Yan <[email protected]>
null
2021-10-17 10:27:47+00:00
2021-10-18 07:41:18+00:00
contrib/dockerd-rootless.sh
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w $XDG_RUNTIME_DIR ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d $HOME ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z $rootlesskit ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z $net ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z $mtu ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z $net ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z $mtu ]; then mtu=1500 fi if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ $_DOCKERD_ROOTLESS_CHILD = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w "$XDG_RUNTIME_DIR" ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d "$HOME" ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z "$rootlesskit" ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z "$net" ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z "$mtu" ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z "$net" ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z "$mtu" ]; then mtu=1500 fi if [ -z "$_DOCKERD_ROOTLESS_CHILD" ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ "$_DOCKERD_ROOTLESS_CHILD" = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
uddmorningsun
311ec0d77fc3c19b75cf2290da45cf261144482f
921658af951df877c219e8af841354e21857f6eb
I'm confused for origin purpose for this line. If `_DOCKERD_ROOTLESS_CHILD=errorValue dockerd-rootless.sh --help` it will exit directly since `set -e`, expected behavior? So, I add `true` for protection mechanism. If it's a expected behavior, I'll rebase and update it after reviewing.
uddmorningsun
4,417
moby/moby
42,946
dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior
**- What I did** `XDG_RUNTIME_DIR` variable not setting, check should be fail. ``` [root@master ~]# echo $XDG_RUNTIME_DIR /run/user/0 [root@master ~]# su - foouser Last login: Sun Oct 17 17:52:14 CST 2021 on pts/0 [foouser@master ~]$ id foouser uid=1000(foouser) gid=1000(foouser) groups=1000(foouser) [foouser@master ~]$ echo $XDG_RUNTIME_DIR [foouser@master ~]$ systemctl --user show-environment Failed to get D-Bus connection: Connection refused ``` ``` $ cat test.sh echo "orign value=$XDG_RUNTIME_DIR" echo "1. with [ ] not quote ..." [ -w $XDG_RUNTIME_DIR ] echo "get 1 ret_code: $?" echo "2. with [ ] and quote ..." [ -w "$XDG_RUNTIME_DIR" ] echo "get 2 ret_code: $?" $ sh ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 [foouser@bk-master ~]$ bash ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 ``` **- How I did it** Quoted from `Advanced Bash-Scripting Guide` https://tldp.org/LDP/abs/html/gotchas.html > Sometimes variables within "test" brackets ([ ]) need to be quoted (double quotes). Failure to do so may cause unexpected behavior. See Example 7-6, Example 20-5, and Example 9-6. **- How to verify it** Updated `/bin/dockerd-rootless.sh` manually and run it: ``` [foouser@bk-master ~]$ echo $XDG_RUNTIME_DIR [foouser@bk-master ~]$ dockerd-rootless.sh --help + case "$1" in + '[' -w '' ']' + echo 'XDG_RUNTIME_DIR needs to be set and writable' XDG_RUNTIME_DIR needs to be set and writable + exit 1 ``` **- Description for the changelog** dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior Signed-off-by: Chenyang Yan <[email protected]>
null
2021-10-17 10:27:47+00:00
2021-10-18 07:41:18+00:00
contrib/dockerd-rootless.sh
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w $XDG_RUNTIME_DIR ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d $HOME ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z $rootlesskit ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z $net ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z $mtu ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z $net ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z $mtu ]; then mtu=1500 fi if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ $_DOCKERD_ROOTLESS_CHILD = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w "$XDG_RUNTIME_DIR" ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d "$HOME" ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z "$rootlesskit" ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z "$net" ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z "$mtu" ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z "$net" ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z "$mtu" ]; then mtu=1500 fi if [ -z "$_DOCKERD_ROOTLESS_CHILD" ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ "$_DOCKERD_ROOTLESS_CHILD" = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
uddmorningsun
311ec0d77fc3c19b75cf2290da45cf261144482f
921658af951df877c219e8af841354e21857f6eb
This is an assertion. If the variable is set but its value is not “1”, it should fail.
AkihiroSuda
4,418
moby/moby
42,946
dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior
**- What I did** `XDG_RUNTIME_DIR` variable not setting, check should be fail. ``` [root@master ~]# echo $XDG_RUNTIME_DIR /run/user/0 [root@master ~]# su - foouser Last login: Sun Oct 17 17:52:14 CST 2021 on pts/0 [foouser@master ~]$ id foouser uid=1000(foouser) gid=1000(foouser) groups=1000(foouser) [foouser@master ~]$ echo $XDG_RUNTIME_DIR [foouser@master ~]$ systemctl --user show-environment Failed to get D-Bus connection: Connection refused ``` ``` $ cat test.sh echo "orign value=$XDG_RUNTIME_DIR" echo "1. with [ ] not quote ..." [ -w $XDG_RUNTIME_DIR ] echo "get 1 ret_code: $?" echo "2. with [ ] and quote ..." [ -w "$XDG_RUNTIME_DIR" ] echo "get 2 ret_code: $?" $ sh ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 [foouser@bk-master ~]$ bash ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 ``` **- How I did it** Quoted from `Advanced Bash-Scripting Guide` https://tldp.org/LDP/abs/html/gotchas.html > Sometimes variables within "test" brackets ([ ]) need to be quoted (double quotes). Failure to do so may cause unexpected behavior. See Example 7-6, Example 20-5, and Example 9-6. **- How to verify it** Updated `/bin/dockerd-rootless.sh` manually and run it: ``` [foouser@bk-master ~]$ echo $XDG_RUNTIME_DIR [foouser@bk-master ~]$ dockerd-rootless.sh --help + case "$1" in + '[' -w '' ']' + echo 'XDG_RUNTIME_DIR needs to be set and writable' XDG_RUNTIME_DIR needs to be set and writable + exit 1 ``` **- Description for the changelog** dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior Signed-off-by: Chenyang Yan <[email protected]>
null
2021-10-17 10:27:47+00:00
2021-10-18 07:41:18+00:00
contrib/dockerd-rootless.sh
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w $XDG_RUNTIME_DIR ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d $HOME ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z $rootlesskit ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z $net ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z $mtu ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z $net ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z $mtu ]; then mtu=1500 fi if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ $_DOCKERD_ROOTLESS_CHILD = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w "$XDG_RUNTIME_DIR" ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d "$HOME" ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z "$rootlesskit" ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z "$net" ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z "$mtu" ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z "$net" ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z "$mtu" ]; then mtu=1500 fi if [ -z "$_DOCKERD_ROOTLESS_CHILD" ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ "$_DOCKERD_ROOTLESS_CHILD" = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
uddmorningsun
311ec0d77fc3c19b75cf2290da45cf261144482f
921658af951df877c219e8af841354e21857f6eb
I see. Maybe add error message to give tips? If not, user don't know what happened? e.g.: ``` [ "$_DOCKERD_ROOTLESS_CHILD" = 1 ] || { echo "variable '_DOCKERD_ROOTLESS_CHILD' must be 1" exit 1 } ```
uddmorningsun
4,419
moby/moby
42,946
dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior
**- What I did** `XDG_RUNTIME_DIR` variable not setting, check should be fail. ``` [root@master ~]# echo $XDG_RUNTIME_DIR /run/user/0 [root@master ~]# su - foouser Last login: Sun Oct 17 17:52:14 CST 2021 on pts/0 [foouser@master ~]$ id foouser uid=1000(foouser) gid=1000(foouser) groups=1000(foouser) [foouser@master ~]$ echo $XDG_RUNTIME_DIR [foouser@master ~]$ systemctl --user show-environment Failed to get D-Bus connection: Connection refused ``` ``` $ cat test.sh echo "orign value=$XDG_RUNTIME_DIR" echo "1. with [ ] not quote ..." [ -w $XDG_RUNTIME_DIR ] echo "get 1 ret_code: $?" echo "2. with [ ] and quote ..." [ -w "$XDG_RUNTIME_DIR" ] echo "get 2 ret_code: $?" $ sh ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 [foouser@bk-master ~]$ bash ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 ``` **- How I did it** Quoted from `Advanced Bash-Scripting Guide` https://tldp.org/LDP/abs/html/gotchas.html > Sometimes variables within "test" brackets ([ ]) need to be quoted (double quotes). Failure to do so may cause unexpected behavior. See Example 7-6, Example 20-5, and Example 9-6. **- How to verify it** Updated `/bin/dockerd-rootless.sh` manually and run it: ``` [foouser@bk-master ~]$ echo $XDG_RUNTIME_DIR [foouser@bk-master ~]$ dockerd-rootless.sh --help + case "$1" in + '[' -w '' ']' + echo 'XDG_RUNTIME_DIR needs to be set and writable' XDG_RUNTIME_DIR needs to be set and writable + exit 1 ``` **- Description for the changelog** dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior Signed-off-by: Chenyang Yan <[email protected]>
null
2021-10-17 10:27:47+00:00
2021-10-18 07:41:18+00:00
contrib/dockerd-rootless.sh
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w $XDG_RUNTIME_DIR ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d $HOME ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z $rootlesskit ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z $net ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z $mtu ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z $net ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z $mtu ]; then mtu=1500 fi if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ $_DOCKERD_ROOTLESS_CHILD = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w "$XDG_RUNTIME_DIR" ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d "$HOME" ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z "$rootlesskit" ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z "$net" ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z "$mtu" ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z "$net" ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z "$mtu" ]; then mtu=1500 fi if [ -z "$_DOCKERD_ROOTLESS_CHILD" ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ "$_DOCKERD_ROOTLESS_CHILD" = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
uddmorningsun
311ec0d77fc3c19b75cf2290da45cf261144482f
921658af951df877c219e8af841354e21857f6eb
The error is theoretical and won’t happen in the actual code, so I think we can just retain the previous code.
AkihiroSuda
4,420
moby/moby
42,946
dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior
**- What I did** `XDG_RUNTIME_DIR` variable not setting, check should be fail. ``` [root@master ~]# echo $XDG_RUNTIME_DIR /run/user/0 [root@master ~]# su - foouser Last login: Sun Oct 17 17:52:14 CST 2021 on pts/0 [foouser@master ~]$ id foouser uid=1000(foouser) gid=1000(foouser) groups=1000(foouser) [foouser@master ~]$ echo $XDG_RUNTIME_DIR [foouser@master ~]$ systemctl --user show-environment Failed to get D-Bus connection: Connection refused ``` ``` $ cat test.sh echo "orign value=$XDG_RUNTIME_DIR" echo "1. with [ ] not quote ..." [ -w $XDG_RUNTIME_DIR ] echo "get 1 ret_code: $?" echo "2. with [ ] and quote ..." [ -w "$XDG_RUNTIME_DIR" ] echo "get 2 ret_code: $?" $ sh ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 [foouser@bk-master ~]$ bash ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 ``` **- How I did it** Quoted from `Advanced Bash-Scripting Guide` https://tldp.org/LDP/abs/html/gotchas.html > Sometimes variables within "test" brackets ([ ]) need to be quoted (double quotes). Failure to do so may cause unexpected behavior. See Example 7-6, Example 20-5, and Example 9-6. **- How to verify it** Updated `/bin/dockerd-rootless.sh` manually and run it: ``` [foouser@bk-master ~]$ echo $XDG_RUNTIME_DIR [foouser@bk-master ~]$ dockerd-rootless.sh --help + case "$1" in + '[' -w '' ']' + echo 'XDG_RUNTIME_DIR needs to be set and writable' XDG_RUNTIME_DIR needs to be set and writable + exit 1 ``` **- Description for the changelog** dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior Signed-off-by: Chenyang Yan <[email protected]>
null
2021-10-17 10:27:47+00:00
2021-10-18 07:41:18+00:00
contrib/dockerd-rootless.sh
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w $XDG_RUNTIME_DIR ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d $HOME ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z $rootlesskit ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z $net ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z $mtu ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z $net ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z $mtu ]; then mtu=1500 fi if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ $_DOCKERD_ROOTLESS_CHILD = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w "$XDG_RUNTIME_DIR" ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d "$HOME" ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z "$rootlesskit" ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z "$net" ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z "$mtu" ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z "$net" ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z "$mtu" ]; then mtu=1500 fi if [ -z "$_DOCKERD_ROOTLESS_CHILD" ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ "$_DOCKERD_ROOTLESS_CHILD" = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
uddmorningsun
311ec0d77fc3c19b75cf2290da45cf261144482f
921658af951df877c219e8af841354e21857f6eb
OK
uddmorningsun
4,421
moby/moby
42,946
dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior
**- What I did** `XDG_RUNTIME_DIR` variable not setting, check should be fail. ``` [root@master ~]# echo $XDG_RUNTIME_DIR /run/user/0 [root@master ~]# su - foouser Last login: Sun Oct 17 17:52:14 CST 2021 on pts/0 [foouser@master ~]$ id foouser uid=1000(foouser) gid=1000(foouser) groups=1000(foouser) [foouser@master ~]$ echo $XDG_RUNTIME_DIR [foouser@master ~]$ systemctl --user show-environment Failed to get D-Bus connection: Connection refused ``` ``` $ cat test.sh echo "orign value=$XDG_RUNTIME_DIR" echo "1. with [ ] not quote ..." [ -w $XDG_RUNTIME_DIR ] echo "get 1 ret_code: $?" echo "2. with [ ] and quote ..." [ -w "$XDG_RUNTIME_DIR" ] echo "get 2 ret_code: $?" $ sh ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 [foouser@bk-master ~]$ bash ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 ``` **- How I did it** Quoted from `Advanced Bash-Scripting Guide` https://tldp.org/LDP/abs/html/gotchas.html > Sometimes variables within "test" brackets ([ ]) need to be quoted (double quotes). Failure to do so may cause unexpected behavior. See Example 7-6, Example 20-5, and Example 9-6. **- How to verify it** Updated `/bin/dockerd-rootless.sh` manually and run it: ``` [foouser@bk-master ~]$ echo $XDG_RUNTIME_DIR [foouser@bk-master ~]$ dockerd-rootless.sh --help + case "$1" in + '[' -w '' ']' + echo 'XDG_RUNTIME_DIR needs to be set and writable' XDG_RUNTIME_DIR needs to be set and writable + exit 1 ``` **- Description for the changelog** dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior Signed-off-by: Chenyang Yan <[email protected]>
null
2021-10-17 10:27:47+00:00
2021-10-18 07:41:18+00:00
contrib/dockerd-rootless.sh
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w $XDG_RUNTIME_DIR ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d $HOME ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z $rootlesskit ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z $net ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z $mtu ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z $net ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z $mtu ]; then mtu=1500 fi if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ $_DOCKERD_ROOTLESS_CHILD = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w "$XDG_RUNTIME_DIR" ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d "$HOME" ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z "$rootlesskit" ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z "$net" ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z "$mtu" ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z "$net" ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z "$mtu" ]; then mtu=1500 fi if [ -z "$_DOCKERD_ROOTLESS_CHILD" ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ "$_DOCKERD_ROOTLESS_CHILD" = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
uddmorningsun
311ec0d77fc3c19b75cf2290da45cf261144482f
921658af951df877c219e8af841354e21857f6eb
Have rebased and updated for this.
uddmorningsun
4,422
moby/moby
42,946
dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior
**- What I did** `XDG_RUNTIME_DIR` variable not setting, check should be fail. ``` [root@master ~]# echo $XDG_RUNTIME_DIR /run/user/0 [root@master ~]# su - foouser Last login: Sun Oct 17 17:52:14 CST 2021 on pts/0 [foouser@master ~]$ id foouser uid=1000(foouser) gid=1000(foouser) groups=1000(foouser) [foouser@master ~]$ echo $XDG_RUNTIME_DIR [foouser@master ~]$ systemctl --user show-environment Failed to get D-Bus connection: Connection refused ``` ``` $ cat test.sh echo "orign value=$XDG_RUNTIME_DIR" echo "1. with [ ] not quote ..." [ -w $XDG_RUNTIME_DIR ] echo "get 1 ret_code: $?" echo "2. with [ ] and quote ..." [ -w "$XDG_RUNTIME_DIR" ] echo "get 2 ret_code: $?" $ sh ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 [foouser@bk-master ~]$ bash ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 ``` **- How I did it** Quoted from `Advanced Bash-Scripting Guide` https://tldp.org/LDP/abs/html/gotchas.html > Sometimes variables within "test" brackets ([ ]) need to be quoted (double quotes). Failure to do so may cause unexpected behavior. See Example 7-6, Example 20-5, and Example 9-6. **- How to verify it** Updated `/bin/dockerd-rootless.sh` manually and run it: ``` [foouser@bk-master ~]$ echo $XDG_RUNTIME_DIR [foouser@bk-master ~]$ dockerd-rootless.sh --help + case "$1" in + '[' -w '' ']' + echo 'XDG_RUNTIME_DIR needs to be set and writable' XDG_RUNTIME_DIR needs to be set and writable + exit 1 ``` **- Description for the changelog** dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior Signed-off-by: Chenyang Yan <[email protected]>
null
2021-10-17 10:27:47+00:00
2021-10-18 07:41:18+00:00
contrib/dockerd-rootless.sh
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w $XDG_RUNTIME_DIR ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d $HOME ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z $rootlesskit ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z $net ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z $mtu ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z $net ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z $mtu ]; then mtu=1500 fi if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ $_DOCKERD_ROOTLESS_CHILD = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w "$XDG_RUNTIME_DIR" ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d "$HOME" ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z "$rootlesskit" ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z "$net" ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z "$mtu" ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z "$net" ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z "$mtu" ]; then mtu=1500 fi if [ -z "$_DOCKERD_ROOTLESS_CHILD" ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ "$_DOCKERD_ROOTLESS_CHILD" = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
uddmorningsun
311ec0d77fc3c19b75cf2290da45cf261144482f
921658af951df877c219e8af841354e21857f6eb
@AkihiroSuda should this have been an `if` condition as well? (looking why this is a test without any conditional branch associated with it
thaJeztah
4,423
moby/moby
42,946
dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior
**- What I did** `XDG_RUNTIME_DIR` variable not setting, check should be fail. ``` [root@master ~]# echo $XDG_RUNTIME_DIR /run/user/0 [root@master ~]# su - foouser Last login: Sun Oct 17 17:52:14 CST 2021 on pts/0 [foouser@master ~]$ id foouser uid=1000(foouser) gid=1000(foouser) groups=1000(foouser) [foouser@master ~]$ echo $XDG_RUNTIME_DIR [foouser@master ~]$ systemctl --user show-environment Failed to get D-Bus connection: Connection refused ``` ``` $ cat test.sh echo "orign value=$XDG_RUNTIME_DIR" echo "1. with [ ] not quote ..." [ -w $XDG_RUNTIME_DIR ] echo "get 1 ret_code: $?" echo "2. with [ ] and quote ..." [ -w "$XDG_RUNTIME_DIR" ] echo "get 2 ret_code: $?" $ sh ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 [foouser@bk-master ~]$ bash ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 ``` **- How I did it** Quoted from `Advanced Bash-Scripting Guide` https://tldp.org/LDP/abs/html/gotchas.html > Sometimes variables within "test" brackets ([ ]) need to be quoted (double quotes). Failure to do so may cause unexpected behavior. See Example 7-6, Example 20-5, and Example 9-6. **- How to verify it** Updated `/bin/dockerd-rootless.sh` manually and run it: ``` [foouser@bk-master ~]$ echo $XDG_RUNTIME_DIR [foouser@bk-master ~]$ dockerd-rootless.sh --help + case "$1" in + '[' -w '' ']' + echo 'XDG_RUNTIME_DIR needs to be set and writable' XDG_RUNTIME_DIR needs to be set and writable + exit 1 ``` **- Description for the changelog** dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior Signed-off-by: Chenyang Yan <[email protected]>
null
2021-10-17 10:27:47+00:00
2021-10-18 07:41:18+00:00
contrib/dockerd-rootless.sh
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w $XDG_RUNTIME_DIR ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d $HOME ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z $rootlesskit ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z $net ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z $mtu ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z $net ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z $mtu ]; then mtu=1500 fi if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ $_DOCKERD_ROOTLESS_CHILD = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w "$XDG_RUNTIME_DIR" ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d "$HOME" ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z "$rootlesskit" ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z "$net" ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z "$mtu" ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z "$net" ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z "$mtu" ]; then mtu=1500 fi if [ -z "$_DOCKERD_ROOTLESS_CHILD" ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ "$_DOCKERD_ROOTLESS_CHILD" = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
uddmorningsun
311ec0d77fc3c19b75cf2290da45cf261144482f
921658af951df877c219e8af841354e21857f6eb
Because it is just an equivalent of `assert (x == 1);` in C https://github.com/moby/moby/pull/42946#discussion_r730426314
AkihiroSuda
4,424
moby/moby
42,946
dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior
**- What I did** `XDG_RUNTIME_DIR` variable not setting, check should be fail. ``` [root@master ~]# echo $XDG_RUNTIME_DIR /run/user/0 [root@master ~]# su - foouser Last login: Sun Oct 17 17:52:14 CST 2021 on pts/0 [foouser@master ~]$ id foouser uid=1000(foouser) gid=1000(foouser) groups=1000(foouser) [foouser@master ~]$ echo $XDG_RUNTIME_DIR [foouser@master ~]$ systemctl --user show-environment Failed to get D-Bus connection: Connection refused ``` ``` $ cat test.sh echo "orign value=$XDG_RUNTIME_DIR" echo "1. with [ ] not quote ..." [ -w $XDG_RUNTIME_DIR ] echo "get 1 ret_code: $?" echo "2. with [ ] and quote ..." [ -w "$XDG_RUNTIME_DIR" ] echo "get 2 ret_code: $?" $ sh ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 [foouser@bk-master ~]$ bash ./test.sh orign value= 1. with [ ] not quote ... get 1 ret_code: 0 2. with [ ] and quote ... get 2 ret_code: 1 ``` **- How I did it** Quoted from `Advanced Bash-Scripting Guide` https://tldp.org/LDP/abs/html/gotchas.html > Sometimes variables within "test" brackets ([ ]) need to be quoted (double quotes). Failure to do so may cause unexpected behavior. See Example 7-6, Example 20-5, and Example 9-6. **- How to verify it** Updated `/bin/dockerd-rootless.sh` manually and run it: ``` [foouser@bk-master ~]$ echo $XDG_RUNTIME_DIR [foouser@bk-master ~]$ dockerd-rootless.sh --help + case "$1" in + '[' -w '' ']' + echo 'XDG_RUNTIME_DIR needs to be set and writable' XDG_RUNTIME_DIR needs to be set and writable + exit 1 ``` **- Description for the changelog** dockerd-rootless.sh: Fix variable not double quotes cause unexpected behavior Signed-off-by: Chenyang Yan <[email protected]>
null
2021-10-17 10:27:47+00:00
2021-10-18 07:41:18+00:00
contrib/dockerd-rootless.sh
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w $XDG_RUNTIME_DIR ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d $HOME ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z $rootlesskit ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z $net ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z $mtu ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z $net ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z $mtu ]; then mtu=1500 fi if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ $_DOCKERD_ROOTLESS_CHILD = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/go/rootless/ set -e -x case "$1" in "check" | "install" | "uninstall") echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?" exit 1 ;; esac if ! [ -w "$XDG_RUNTIME_DIR" ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -d "$HOME" ]; then echo "HOME needs to be set and exist." exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if command -v $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z "$rootlesskit" ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z "$net" ]; then if command -v slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z "$mtu" ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z "$net" ]; then if command -v vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z "$mtu" ]; then mtu=1500 fi if [ -z "$_DOCKERD_ROOTLESS_CHILD" ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent. # https://github.com/rootless-containers/rootlesskit/issues/94 if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then _DOCKERD_ROOTLESS_SELINUX=1 export _DOCKERD_ROOTLESS_SELINUX fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ "$_DOCKERD_ROOTLESS_CHILD" = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then # iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected. # https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401 # https://github.com/moby/moby/issues/41230 chcon system_u:object_r:iptables_var_run_t:s0 /run fi if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then # Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed. # https://github.com/rootless-containers/rootlesskit/issues/225 realpath_etc_ssl=$(realpath /etc/ssl) rm -f /etc/ssl mkdir /etc/ssl mount --rbind ${realpath_etc_ssl} /etc/ssl fi exec dockerd $@ fi
uddmorningsun
311ec0d77fc3c19b75cf2290da45cf261144482f
921658af951df877c219e8af841354e21857f6eb
🤦 I missed the discussion above; probably it was shown as "resolved"
thaJeztah
4,425
moby/moby
42,942
update containerd binary to v1.6.1
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** **- How I did it** **- How to verify it** **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-10-15 17:34:40+00:00
2022-03-12 19:59:38+00:00
Dockerfile.windows
# escape=` # ----------------------------------------------------------------------------------------- # This file describes the standard way to build Docker in a container on Windows # Server 2016 or Windows 10. # # Maintainer: @jhowardmsft # ----------------------------------------------------------------------------------------- # Prerequisites: # -------------- # # 1. Windows Server 2016 or Windows 10 with all Windows updates applied. The major # build number must be at least 14393. This can be confirmed, for example, by # running the following from an elevated PowerShell prompt - this sample output # is from a fully up to date machine as at mid-November 2016: # # >> PS C:\> $(gin).WindowsBuildLabEx # >> 14393.447.amd64fre.rs1_release_inmarket.161102-0100 # # 2. Git for Windows (or another git client) must be installed. https://git-scm.com/download/win. # # 3. The machine must be configured to run containers. For example, by following # the quick start guidance at https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or # https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md # # 4. If building in a Hyper-V VM: For Windows Server 2016 using Windows Server # containers as the default option, it is recommended you have at least 1GB # of memory assigned; For Windows 10 where Hyper-V Containers are employed, you # should have at least 4GB of memory assigned. Note also, to run Hyper-V # containers in a VM, it is necessary to configure the VM for nested virtualization. # ----------------------------------------------------------------------------------------- # Usage: # ----- # # The following steps should be run from an (elevated*) Windows PowerShell prompt. # # (*In a default installation of containers on Windows following the quick-start guidance at # https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start, # the docker.exe client must run elevated to be able to connect to the daemon). # # 1. Clone the sources from github.com: # # >> git clone https://github.com/docker/docker.git C:\gopath\src\github.com\docker\docker # >> Cloning into 'C:\gopath\src\github.com\docker\docker'... # >> remote: Counting objects: 186216, done. # >> remote: Compressing objects: 100% (21/21), done. # >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195 # >> Receiving objects: 100% (186216/186216), 104.32 MiB | 8.18 MiB/s, done. # >> Resolving deltas: 100% (123139/123139), done. # >> Checking connectivity... done. # >> Checking out files: 100% (3912/3912), done. # >> PS C:\> # # # 2. Change directory to the cloned docker sources: # # >> cd C:\gopath\src\github.com\docker\docker # # # 3. Build a docker image with the components required to build the docker binaries from source # by running one of the following: # # >> docker build -t nativebuildimage -f Dockerfile.windows . # >> docker build -t nativebuildimage -f Dockerfile.windows -m 2GB . (if using Hyper-V containers) # # # 4. Build the docker executable binaries by running one of the following: # # >> $DOCKER_GITCOMMIT=(git rev-parse --short HEAD) # >> docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT nativebuildimage hack\make.ps1 -Binary # >> docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT -m 2GB nativebuildimage hack\make.ps1 -Binary (if using Hyper-V containers) # # # 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination # folder on the host system where you want the binaries to be located. # # >> docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe # >> docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe # # # 6. (Optional) Remove the interim container holding the built executable binaries: # # >> docker rm binaries # # # 7. (Optional) Remove the image used for the container in which the executable # binaries are build. Tip - it may be useful to keep this image around if you need to # build multiple times. Then you can take advantage of the builder cache to have an # image which has all the components required to build the binaries already installed. # # >> docker rmi nativebuildimage # # ----------------------------------------------------------------------------------------- # The validation tests can only run directly on the host. This is because they calculate # information from the git repo, but the .git directory is not passed into the image as # it is excluded via .dockerignore. Run the following from a Windows PowerShell prompt # (elevation is not required): (Note Go must be installed to run these tests) # # >> hack\make.ps1 -DCO -PkgImports -GoFormat # ----------------------------------------------------------------------------------------- # To run unit tests, ensure you have created the nativebuildimage above. Then run one of # the following from an (elevated) Windows PowerShell prompt: # # >> docker run --rm nativebuildimage hack\make.ps1 -TestUnit # >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -TestUnit (if using Hyper-V containers) # ----------------------------------------------------------------------------------------- # To run unit tests and binary build, ensure you have created the nativebuildimage above. Then # run one of the following from an (elevated) Windows PowerShell prompt: # # >> docker run nativebuildimage hack\make.ps1 -All # >> docker run -m 2GB nativebuildimage hack\make.ps1 -All (if using Hyper-V containers) # ----------------------------------------------------------------------------------------- # Important notes: # --------------- # # Don't attempt to use a bind mount to pass a local directory as the bundles target # directory. It does not work (golang attempts for follow a mapped folder incorrectly). # Instead, use docker cp as per the example. # # go.zip is not removed from the image as it is used by the Windows CI servers # to ensure the host and image are running consistent versions of go. # # Nanoserver support is a work in progress. Although the image will build if the # FROM statement is updated, it will not work when running autogen through hack\make.ps1. # It is suspected that the required GCC utilities (eg gcc, windres, windmc) silently # quit due to the use of console hooks which are not available. # # The docker integration tests do not currently run in a container on Windows, predominantly # due to Windows not supporting privileged mode, so anything using a volume would fail. # They (along with the rest of the docker CI suite) can be run using # https://github.com/kevpar/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1. # # ----------------------------------------------------------------------------------------- # The number of build steps below are explicitly minimised to improve performance. # Extremely important - do not change the following line to reference a "specific" image, # such as `mcr.microsoft.com/windows/servercore:ltsc2019`. If using this Dockerfile in process # isolated containers, the kernel of the host must match the container image, and hence # would fail between Windows Server 2016 (aka RS1) and Windows Server 2019 (aka RS5). # It is expected that the image `microsoft/windowsservercore:latest` is present, and matches # the hosts kernel version before doing a build. FROM microsoft/windowsservercore # Use PowerShell as the default shell SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"] ARG GO_VERSION=1.17.8 ARG CONTAINERD_VERSION=1.5.10 ARG GOTESTSUM_VERSION=v1.7.0 # Environment variable notes: # - GO_VERSION must be consistent with 'Dockerfile' used by Linux. # - CONTAINERD_VERSION must be consistent with 'hack/dockerfile/install/containerd.installer' used by Linux. # - FROM_DOCKERFILE is used for detection of building within a container. ENV GO_VERSION=${GO_VERSION} ` CONTAINERD_VERSION=${CONTAINERD_VERSION} ` GIT_VERSION=2.11.1 ` GOPATH=C:\gopath ` GO111MODULE=off ` FROM_DOCKERFILE=1 ` GOTESTSUM_VERSION=${GOTESTSUM_VERSION} RUN ` Function Test-Nano() { ` $EditionId = (Get-ItemProperty -Path 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion' -Name 'EditionID').EditionId; ` return (($EditionId -eq 'ServerStandardNano') -or ($EditionId -eq 'ServerDataCenterNano') -or ($EditionId -eq 'NanoServer')); ` }` ` Function Download-File([string] $source, [string] $target) { ` if (Test-Nano) { ` $handler = New-Object System.Net.Http.HttpClientHandler; ` $client = New-Object System.Net.Http.HttpClient($handler); ` $client.Timeout = New-Object System.TimeSpan(0, 30, 0); ` $cancelTokenSource = [System.Threading.CancellationTokenSource]::new(); ` $responseMsg = $client.GetAsync([System.Uri]::new($source), $cancelTokenSource.Token); ` $responseMsg.Wait(); ` if (!$responseMsg.IsCanceled) { ` $response = $responseMsg.Result; ` if ($response.IsSuccessStatusCode) { ` $downloadedFileStream = [System.IO.FileStream]::new($target, [System.IO.FileMode]::Create, [System.IO.FileAccess]::Write); ` $copyStreamOp = $response.Content.CopyToAsync($downloadedFileStream); ` $copyStreamOp.Wait(); ` $downloadedFileStream.Close(); ` if ($copyStreamOp.Exception -ne $null) { throw $copyStreamOp.Exception } ` } ` } else { ` Throw ("Failed to download " + $source) ` }` } else { ` [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; ` $webClient = New-Object System.Net.WebClient; ` $webClient.DownloadFile($source, $target); ` } ` } ` ` setx /M PATH $('C:\git\cmd;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin;C:\containerd\bin'); ` ` Write-Host INFO: Downloading git...; ` $location='https://www.nuget.org/api/v2/package/GitForWindows/'+$Env:GIT_VERSION; ` Download-File $location C:\gitsetup.zip; ` ` Write-Host INFO: Downloading go...; ` $dlGoVersion=$Env:GO_VERSION -replace '\.0$',''; ` Download-File "https://golang.org/dl/go${dlGoVersion}.windows-amd64.zip" C:\go.zip; ` ` Write-Host INFO: Downloading compiler 1 of 3...; ` Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/gcc.zip C:\gcc.zip; ` ` Write-Host INFO: Downloading compiler 2 of 3...; ` Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/runtime.zip C:\runtime.zip; ` ` Write-Host INFO: Downloading compiler 3 of 3...; ` Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/binutils.zip C:\binutils.zip; ` ` Write-Host INFO: Extracting git...; ` Expand-Archive C:\gitsetup.zip C:\git-tmp; ` New-Item -Type Directory C:\git | Out-Null; ` Move-Item C:\git-tmp\tools\* C:\git\.; ` Remove-Item -Recurse -Force C:\git-tmp; ` ` Write-Host INFO: Expanding go...; ` Expand-Archive C:\go.zip -DestinationPath C:\; ` ` Write-Host INFO: Expanding compiler 1 of 3...; ` Expand-Archive C:\gcc.zip -DestinationPath C:\gcc -Force; ` Write-Host INFO: Expanding compiler 2 of 3...; ` Expand-Archive C:\runtime.zip -DestinationPath C:\gcc -Force; ` Write-Host INFO: Expanding compiler 3 of 3...; ` Expand-Archive C:\binutils.zip -DestinationPath C:\gcc -Force; ` ` Write-Host INFO: Removing downloaded files...; ` Remove-Item C:\gcc.zip; ` Remove-Item C:\runtime.zip; ` Remove-Item C:\binutils.zip; ` Remove-Item C:\gitsetup.zip; ` ` Write-Host INFO: Downloading containerd; ` Install-Package -Force 7Zip4PowerShell; ` $location='https://github.com/containerd/containerd/releases/download/v'+$Env:CONTAINERD_VERSION+'/containerd-'+$Env:CONTAINERD_VERSION+'-windows-amd64.tar.gz'; ` Download-File $location C:\containerd.tar.gz; ` New-Item -Path C:\containerd -ItemType Directory; ` Expand-7Zip C:\containerd.tar.gz C:\; ` Expand-7Zip C:\containerd.tar C:\containerd; ` Remove-Item C:\containerd.tar.gz; ` Remove-Item C:\containerd.tar; ` ` # Ensure all directories exist that we will require below.... $srcDir = """$Env:GOPATH`\src\github.com\docker\docker\bundles"""; ` Write-Host INFO: Ensuring existence of directory $srcDir...; ` New-Item -Force -ItemType Directory -Path $srcDir | Out-Null; ` ` Write-Host INFO: Configuring git core.autocrlf...; ` C:\git\cmd\git config --global core.autocrlf true; RUN ` Function Install-GoTestSum() { ` $Env:GO111MODULE = 'on'; ` $tmpGobin = "${Env:GOBIN_TMP}"; ` $Env:GOBIN = """${Env:GOPATH}`\bin"""; ` Write-Host "INFO: Installing gotestsum version $Env:GOTESTSUM_VERSION in $Env:GOBIN"; ` &go install "gotest.tools/gotestsum@${Env:GOTESTSUM_VERSION}"; ` $Env:GOBIN = "${tmpGobin}"; ` $Env:GO111MODULE = 'off'; ` if ($LASTEXITCODE -ne 0) { ` Throw '"gotestsum install failed..."'; ` } ` } ` ` Install-GoTestSum # Make PowerShell the default entrypoint ENTRYPOINT ["powershell.exe"] # Set the working directory to the location of the sources WORKDIR ${GOPATH}\src\github.com\docker\docker # Copy the sources into the container COPY . .
# escape=` # ----------------------------------------------------------------------------------------- # This file describes the standard way to build Docker in a container on Windows # Server 2016 or Windows 10. # # Maintainer: @jhowardmsft # ----------------------------------------------------------------------------------------- # Prerequisites: # -------------- # # 1. Windows Server 2016 or Windows 10 with all Windows updates applied. The major # build number must be at least 14393. This can be confirmed, for example, by # running the following from an elevated PowerShell prompt - this sample output # is from a fully up to date machine as at mid-November 2016: # # >> PS C:\> $(gin).WindowsBuildLabEx # >> 14393.447.amd64fre.rs1_release_inmarket.161102-0100 # # 2. Git for Windows (or another git client) must be installed. https://git-scm.com/download/win. # # 3. The machine must be configured to run containers. For example, by following # the quick start guidance at https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or # https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md # # 4. If building in a Hyper-V VM: For Windows Server 2016 using Windows Server # containers as the default option, it is recommended you have at least 1GB # of memory assigned; For Windows 10 where Hyper-V Containers are employed, you # should have at least 4GB of memory assigned. Note also, to run Hyper-V # containers in a VM, it is necessary to configure the VM for nested virtualization. # ----------------------------------------------------------------------------------------- # Usage: # ----- # # The following steps should be run from an (elevated*) Windows PowerShell prompt. # # (*In a default installation of containers on Windows following the quick-start guidance at # https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start, # the docker.exe client must run elevated to be able to connect to the daemon). # # 1. Clone the sources from github.com: # # >> git clone https://github.com/docker/docker.git C:\gopath\src\github.com\docker\docker # >> Cloning into 'C:\gopath\src\github.com\docker\docker'... # >> remote: Counting objects: 186216, done. # >> remote: Compressing objects: 100% (21/21), done. # >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195 # >> Receiving objects: 100% (186216/186216), 104.32 MiB | 8.18 MiB/s, done. # >> Resolving deltas: 100% (123139/123139), done. # >> Checking connectivity... done. # >> Checking out files: 100% (3912/3912), done. # >> PS C:\> # # # 2. Change directory to the cloned docker sources: # # >> cd C:\gopath\src\github.com\docker\docker # # # 3. Build a docker image with the components required to build the docker binaries from source # by running one of the following: # # >> docker build -t nativebuildimage -f Dockerfile.windows . # >> docker build -t nativebuildimage -f Dockerfile.windows -m 2GB . (if using Hyper-V containers) # # # 4. Build the docker executable binaries by running one of the following: # # >> $DOCKER_GITCOMMIT=(git rev-parse --short HEAD) # >> docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT nativebuildimage hack\make.ps1 -Binary # >> docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT -m 2GB nativebuildimage hack\make.ps1 -Binary (if using Hyper-V containers) # # # 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination # folder on the host system where you want the binaries to be located. # # >> docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe # >> docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe # # # 6. (Optional) Remove the interim container holding the built executable binaries: # # >> docker rm binaries # # # 7. (Optional) Remove the image used for the container in which the executable # binaries are build. Tip - it may be useful to keep this image around if you need to # build multiple times. Then you can take advantage of the builder cache to have an # image which has all the components required to build the binaries already installed. # # >> docker rmi nativebuildimage # # ----------------------------------------------------------------------------------------- # The validation tests can only run directly on the host. This is because they calculate # information from the git repo, but the .git directory is not passed into the image as # it is excluded via .dockerignore. Run the following from a Windows PowerShell prompt # (elevation is not required): (Note Go must be installed to run these tests) # # >> hack\make.ps1 -DCO -PkgImports -GoFormat # ----------------------------------------------------------------------------------------- # To run unit tests, ensure you have created the nativebuildimage above. Then run one of # the following from an (elevated) Windows PowerShell prompt: # # >> docker run --rm nativebuildimage hack\make.ps1 -TestUnit # >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -TestUnit (if using Hyper-V containers) # ----------------------------------------------------------------------------------------- # To run unit tests and binary build, ensure you have created the nativebuildimage above. Then # run one of the following from an (elevated) Windows PowerShell prompt: # # >> docker run nativebuildimage hack\make.ps1 -All # >> docker run -m 2GB nativebuildimage hack\make.ps1 -All (if using Hyper-V containers) # ----------------------------------------------------------------------------------------- # Important notes: # --------------- # # Don't attempt to use a bind mount to pass a local directory as the bundles target # directory. It does not work (golang attempts for follow a mapped folder incorrectly). # Instead, use docker cp as per the example. # # go.zip is not removed from the image as it is used by the Windows CI servers # to ensure the host and image are running consistent versions of go. # # Nanoserver support is a work in progress. Although the image will build if the # FROM statement is updated, it will not work when running autogen through hack\make.ps1. # It is suspected that the required GCC utilities (eg gcc, windres, windmc) silently # quit due to the use of console hooks which are not available. # # The docker integration tests do not currently run in a container on Windows, predominantly # due to Windows not supporting privileged mode, so anything using a volume would fail. # They (along with the rest of the docker CI suite) can be run using # https://github.com/kevpar/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1. # # ----------------------------------------------------------------------------------------- # The number of build steps below are explicitly minimised to improve performance. # Extremely important - do not change the following line to reference a "specific" image, # such as `mcr.microsoft.com/windows/servercore:ltsc2019`. If using this Dockerfile in process # isolated containers, the kernel of the host must match the container image, and hence # would fail between Windows Server 2016 (aka RS1) and Windows Server 2019 (aka RS5). # It is expected that the image `microsoft/windowsservercore:latest` is present, and matches # the hosts kernel version before doing a build. FROM microsoft/windowsservercore # Use PowerShell as the default shell SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"] ARG GO_VERSION=1.17.8 ARG CONTAINERD_VERSION=1.6.1 ARG GOTESTSUM_VERSION=v1.7.0 # Environment variable notes: # - GO_VERSION must be consistent with 'Dockerfile' used by Linux. # - CONTAINERD_VERSION must be consistent with 'hack/dockerfile/install/containerd.installer' used by Linux. # - FROM_DOCKERFILE is used for detection of building within a container. ENV GO_VERSION=${GO_VERSION} ` CONTAINERD_VERSION=${CONTAINERD_VERSION} ` GIT_VERSION=2.11.1 ` GOPATH=C:\gopath ` GO111MODULE=off ` FROM_DOCKERFILE=1 ` GOTESTSUM_VERSION=${GOTESTSUM_VERSION} RUN ` Function Test-Nano() { ` $EditionId = (Get-ItemProperty -Path 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion' -Name 'EditionID').EditionId; ` return (($EditionId -eq 'ServerStandardNano') -or ($EditionId -eq 'ServerDataCenterNano') -or ($EditionId -eq 'NanoServer')); ` }` ` Function Download-File([string] $source, [string] $target) { ` if (Test-Nano) { ` $handler = New-Object System.Net.Http.HttpClientHandler; ` $client = New-Object System.Net.Http.HttpClient($handler); ` $client.Timeout = New-Object System.TimeSpan(0, 30, 0); ` $cancelTokenSource = [System.Threading.CancellationTokenSource]::new(); ` $responseMsg = $client.GetAsync([System.Uri]::new($source), $cancelTokenSource.Token); ` $responseMsg.Wait(); ` if (!$responseMsg.IsCanceled) { ` $response = $responseMsg.Result; ` if ($response.IsSuccessStatusCode) { ` $downloadedFileStream = [System.IO.FileStream]::new($target, [System.IO.FileMode]::Create, [System.IO.FileAccess]::Write); ` $copyStreamOp = $response.Content.CopyToAsync($downloadedFileStream); ` $copyStreamOp.Wait(); ` $downloadedFileStream.Close(); ` if ($copyStreamOp.Exception -ne $null) { throw $copyStreamOp.Exception } ` } ` } else { ` Throw ("Failed to download " + $source) ` }` } else { ` [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; ` $webClient = New-Object System.Net.WebClient; ` $webClient.DownloadFile($source, $target); ` } ` } ` ` setx /M PATH $('C:\git\cmd;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin;C:\containerd\bin'); ` ` Write-Host INFO: Downloading git...; ` $location='https://www.nuget.org/api/v2/package/GitForWindows/'+$Env:GIT_VERSION; ` Download-File $location C:\gitsetup.zip; ` ` Write-Host INFO: Downloading go...; ` $dlGoVersion=$Env:GO_VERSION -replace '\.0$',''; ` Download-File "https://golang.org/dl/go${dlGoVersion}.windows-amd64.zip" C:\go.zip; ` ` Write-Host INFO: Downloading compiler 1 of 3...; ` Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/gcc.zip C:\gcc.zip; ` ` Write-Host INFO: Downloading compiler 2 of 3...; ` Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/runtime.zip C:\runtime.zip; ` ` Write-Host INFO: Downloading compiler 3 of 3...; ` Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/binutils.zip C:\binutils.zip; ` ` Write-Host INFO: Extracting git...; ` Expand-Archive C:\gitsetup.zip C:\git-tmp; ` New-Item -Type Directory C:\git | Out-Null; ` Move-Item C:\git-tmp\tools\* C:\git\.; ` Remove-Item -Recurse -Force C:\git-tmp; ` ` Write-Host INFO: Expanding go...; ` Expand-Archive C:\go.zip -DestinationPath C:\; ` ` Write-Host INFO: Expanding compiler 1 of 3...; ` Expand-Archive C:\gcc.zip -DestinationPath C:\gcc -Force; ` Write-Host INFO: Expanding compiler 2 of 3...; ` Expand-Archive C:\runtime.zip -DestinationPath C:\gcc -Force; ` Write-Host INFO: Expanding compiler 3 of 3...; ` Expand-Archive C:\binutils.zip -DestinationPath C:\gcc -Force; ` ` Write-Host INFO: Removing downloaded files...; ` Remove-Item C:\gcc.zip; ` Remove-Item C:\runtime.zip; ` Remove-Item C:\binutils.zip; ` Remove-Item C:\gitsetup.zip; ` ` Write-Host INFO: Downloading containerd; ` Install-Package -Force 7Zip4PowerShell; ` $location='https://github.com/containerd/containerd/releases/download/v'+$Env:CONTAINERD_VERSION+'/containerd-'+$Env:CONTAINERD_VERSION+'-windows-amd64.tar.gz'; ` Download-File $location C:\containerd.tar.gz; ` New-Item -Path C:\containerd -ItemType Directory; ` Expand-7Zip C:\containerd.tar.gz C:\; ` Expand-7Zip C:\containerd.tar C:\containerd; ` Remove-Item C:\containerd.tar.gz; ` Remove-Item C:\containerd.tar; ` ` # Ensure all directories exist that we will require below.... $srcDir = """$Env:GOPATH`\src\github.com\docker\docker\bundles"""; ` Write-Host INFO: Ensuring existence of directory $srcDir...; ` New-Item -Force -ItemType Directory -Path $srcDir | Out-Null; ` ` Write-Host INFO: Configuring git core.autocrlf...; ` C:\git\cmd\git config --global core.autocrlf true; RUN ` Function Install-GoTestSum() { ` $Env:GO111MODULE = 'on'; ` $tmpGobin = "${Env:GOBIN_TMP}"; ` $Env:GOBIN = """${Env:GOPATH}`\bin"""; ` Write-Host "INFO: Installing gotestsum version $Env:GOTESTSUM_VERSION in $Env:GOBIN"; ` &go install "gotest.tools/gotestsum@${Env:GOTESTSUM_VERSION}"; ` $Env:GOBIN = "${tmpGobin}"; ` $Env:GO111MODULE = 'off'; ` if ($LASTEXITCODE -ne 0) { ` Throw '"gotestsum install failed..."'; ` } ` } ` ` Install-GoTestSum # Make PowerShell the default entrypoint ENTRYPOINT ["powershell.exe"] # Set the working directory to the location of the sources WORKDIR ${GOPATH}\src\github.com\docker\docker # Copy the sources into the container COPY . .
thaJeztah
b143ca1c0b08459a6f41b538129a09d07ac76adf
82f20733593669b44ebac80de5b3f9917a384826
Note to self; add temporary fix for version string / download URL in Windows Dockerfile
thaJeztah
4,426
moby/moby
42,935
Dockerfile: CRIU: disable GPG validation, due to expired signing key
This is a horrible thing to do, but CRIU installed here is only used as part of our CI / integration tests. We should of course remove this hack ASAP once the opensuse packagers have set up a new key, but at least this allows us to unblock CI, which is currently completely broken: ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ && apt-get update \ && apt-get install -y --no-install-recommends criu \ && install -D /usr/sbin/criu /build/criu Hit:1 http://cdn-fastly.deb.debian.org/debian bullseye InRelease Hit:2 http://cdn-fastly.deb.debian.org/debian-security bullseye-security InRelease Hit:3 http://cdn-fastly.deb.debian.org/debian bullseye-updates InRelease Get:4 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10 InRelease [1540 B] Err:4 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10 InRelease The following signatures were invalid: EXPKEYSIG 30A8343A498D5A23 devel:tools OBS Project <devel:[email protected]> Reading package lists... W: GPG error: https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10 InRelease: The following signatures were invalid: EXPKEYSIG 30A8343A498D5A23 devel:tools OBS Project <devel:[email protected]> E: The repository 'https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10 InRelease' is not signed. And, checking the signing key (with `apt-key list`): /etc/apt/trusted.gpg.d/criu.gpg.asc ----------------------------------- pub rsa2048 2015-05-03 [SC] [expired: 2021-10-13] 428E 4E34 8405 CE79 00DB 99C2 30A8 343A 498D 5A23 uid [ expired] devel:tools OBS Project <devel:[email protected]> **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-10-14 13:49:09+00:00
2021-10-14 18:08:25+00:00
Dockerfile
# syntax=docker/dockerfile:1.3 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.17.1 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="bullseye" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ && apt-get update \ && apt-get install -y --no-install-recommends criu \ && install -D /usr/sbin/criu /build/criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # REGISTRY_VERSION specifies the version of the registry to build and install # from the https://github.com/docker/distribution repository. This version of # the registry is used to test both schema 1 and schema 2 manifests. Generally, # the version specified here should match a current release. ARG REGISTRY_VERSION=v2.3.0 # REGISTRY_VERSION_SCHEMA1 specifies the version of the regsitry to build and # install from the https://github.com/docker/distribution repository. This is # an older (pre v2.3.0) version of the registry that only supports schema1 # manifests. This version of the registry is not working on arm64, so installation # is skipped on that architecture. ARG REGISTRY_VERSION_SCHEMA1=v2.1.0 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_VERSION" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_VERSION_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # GO_SWAGGER_COMMIT specifies the version of the go-swagger binary to build and # install. Go-swagger is used in CI for validating swagger.yaml in hack/validate/swagger-gen # # Currently uses a fork from https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix, # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN dpkg --add-architecture ppc64el RUN dpkg --add-architecture s390x RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf \ crossbuild-essential-ppc64el \ crossbuild-essential-s390x FROM cross-${CROSS} AS dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems, so other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libapparmor-dev:ppc64el \ libapparmor-dev:s390x \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf \ libseccomp-dev:ppc64el \ libseccomp-dev:s390x FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomll # GOTOML_VERSION specifies the version of the tomll binary to build and install # from the https://github.com/pelletier/go-toml repository. This binary is used # in CI in the hack/validate/toml script. # # When updating this version, consider updating the github.com/pelletier/go-toml # dependency in vendor.conf accordingly. ARG GOTOML_VERSION=v1.8.1 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "github.com/pelletier/go-toml/cmd/tomll@${GOTOML_VERSION}" \ && /build/tomll --help FROM base AS vndr # VNDR_VERSION specifies the version of the vndr tool to build and install # from the https://github.com/LK4D4/vndr repository. # # The vndr tool is used to manage vendored go packages in the vendor directory, # and is pinned to a fixed version because different versions of this tool # can result in differences in the (go) files that are considered for vendoring. ARG VNDR_VERSION=v0.1.2 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "github.com/LK4D4/vndr@${VNDR_VERSION}" \ && /build/vndr --help FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_VERSION COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/containerd.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh containerd FROM base AS golangci_lint ARG GOLANGCI_LINT_VERSION=v1.23.8 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \ && /build/golangci-lint --version FROM base AS gotestsum ARG GOTESTSUM_VERSION=v1.7.0 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}" \ && /build/gotestsum --version FROM base AS shfmt ARG SHFMT_VERSION=v3.0.2 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "mvdan.cc/sh/v3/cmd/shfmt@${SHFMT_VERSION}" \ && /build/shfmt --version FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/dockercli.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh dockercli FROM runtime-dev AS runc ARG RUNC_VERSION ARG RUNC_BUILDTAGS COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/runc.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_VERSION RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/tini.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_VERSION ARG PREFIX=/build COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/rootlesskit.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ /install.sh rootlesskit \ && "${PREFIX}"/rootlesskit --version \ && "${PREFIX}"/rootlesskit-docker-proxy --help COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ bash-completion \ bzip2 \ inetutils-ping \ iproute2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ patch \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip \ zstd # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.26.1 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomll /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/bin/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.3 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.17.1 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="bullseye" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ # FIXME(sebastiaan) temporariliy disable GPG validation for these packages, as the release key has expired (https://github.com/moby/moby/pull/42931#issuecomment-943080120) echo 'deb [ allow-insecure=yes trusted=yes ] https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ && apt-get update \ && apt-get install -y --no-install-recommends criu \ && install -D /usr/sbin/criu /build/criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # REGISTRY_VERSION specifies the version of the registry to build and install # from the https://github.com/docker/distribution repository. This version of # the registry is used to test both schema 1 and schema 2 manifests. Generally, # the version specified here should match a current release. ARG REGISTRY_VERSION=v2.3.0 # REGISTRY_VERSION_SCHEMA1 specifies the version of the regsitry to build and # install from the https://github.com/docker/distribution repository. This is # an older (pre v2.3.0) version of the registry that only supports schema1 # manifests. This version of the registry is not working on arm64, so installation # is skipped on that architecture. ARG REGISTRY_VERSION_SCHEMA1=v2.1.0 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_VERSION" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_VERSION_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # GO_SWAGGER_COMMIT specifies the version of the go-swagger binary to build and # install. Go-swagger is used in CI for validating swagger.yaml in hack/validate/swagger-gen # # Currently uses a fork from https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix, # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN dpkg --add-architecture ppc64el RUN dpkg --add-architecture s390x RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf \ crossbuild-essential-ppc64el \ crossbuild-essential-s390x FROM cross-${CROSS} AS dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems, so other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libapparmor-dev:ppc64el \ libapparmor-dev:s390x \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf \ libseccomp-dev:ppc64el \ libseccomp-dev:s390x FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomll # GOTOML_VERSION specifies the version of the tomll binary to build and install # from the https://github.com/pelletier/go-toml repository. This binary is used # in CI in the hack/validate/toml script. # # When updating this version, consider updating the github.com/pelletier/go-toml # dependency in vendor.conf accordingly. ARG GOTOML_VERSION=v1.8.1 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "github.com/pelletier/go-toml/cmd/tomll@${GOTOML_VERSION}" \ && /build/tomll --help FROM base AS vndr # VNDR_VERSION specifies the version of the vndr tool to build and install # from the https://github.com/LK4D4/vndr repository. # # The vndr tool is used to manage vendored go packages in the vendor directory, # and is pinned to a fixed version because different versions of this tool # can result in differences in the (go) files that are considered for vendoring. ARG VNDR_VERSION=v0.1.2 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "github.com/LK4D4/vndr@${VNDR_VERSION}" \ && /build/vndr --help FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_VERSION COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/containerd.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh containerd FROM base AS golangci_lint ARG GOLANGCI_LINT_VERSION=v1.23.8 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \ && /build/golangci-lint --version FROM base AS gotestsum ARG GOTESTSUM_VERSION=v1.7.0 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}" \ && /build/gotestsum --version FROM base AS shfmt ARG SHFMT_VERSION=v3.0.2 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "mvdan.cc/sh/v3/cmd/shfmt@${SHFMT_VERSION}" \ && /build/shfmt --version FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/dockercli.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh dockercli FROM runtime-dev AS runc ARG RUNC_VERSION ARG RUNC_BUILDTAGS COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/runc.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_VERSION RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/tini.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_VERSION ARG PREFIX=/build COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/rootlesskit.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ /install.sh rootlesskit \ && "${PREFIX}"/rootlesskit --version \ && "${PREFIX}"/rootlesskit-docker-proxy --help COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ bash-completion \ bzip2 \ inetutils-ping \ iproute2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ patch \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip \ zstd # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.26.1 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomll /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/bin/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
thaJeztah
1430d849a4fe74d601896d4bbb0134e898ef8a76
4e6dbb3f5ca73fd6b29b20ff03d0da2d95b378fb
Alternative proposal: ```suggestion echo 'deb [ allow-insecure=yes trusted=yes ] https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ ``` (So our "insecurity" is limited to just this `https` repository instead of any dependencies too)
tianon
4,427
moby/moby
42,935
Dockerfile: CRIU: disable GPG validation, due to expired signing key
This is a horrible thing to do, but CRIU installed here is only used as part of our CI / integration tests. We should of course remove this hack ASAP once the opensuse packagers have set up a new key, but at least this allows us to unblock CI, which is currently completely broken: ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ && apt-get update \ && apt-get install -y --no-install-recommends criu \ && install -D /usr/sbin/criu /build/criu Hit:1 http://cdn-fastly.deb.debian.org/debian bullseye InRelease Hit:2 http://cdn-fastly.deb.debian.org/debian-security bullseye-security InRelease Hit:3 http://cdn-fastly.deb.debian.org/debian bullseye-updates InRelease Get:4 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10 InRelease [1540 B] Err:4 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10 InRelease The following signatures were invalid: EXPKEYSIG 30A8343A498D5A23 devel:tools OBS Project <devel:[email protected]> Reading package lists... W: GPG error: https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10 InRelease: The following signatures were invalid: EXPKEYSIG 30A8343A498D5A23 devel:tools OBS Project <devel:[email protected]> E: The repository 'https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10 InRelease' is not signed. And, checking the signing key (with `apt-key list`): /etc/apt/trusted.gpg.d/criu.gpg.asc ----------------------------------- pub rsa2048 2015-05-03 [SC] [expired: 2021-10-13] 428E 4E34 8405 CE79 00DB 99C2 30A8 343A 498D 5A23 uid [ expired] devel:tools OBS Project <devel:[email protected]> **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-10-14 13:49:09+00:00
2021-10-14 18:08:25+00:00
Dockerfile
# syntax=docker/dockerfile:1.3 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.17.1 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="bullseye" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ && apt-get update \ && apt-get install -y --no-install-recommends criu \ && install -D /usr/sbin/criu /build/criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # REGISTRY_VERSION specifies the version of the registry to build and install # from the https://github.com/docker/distribution repository. This version of # the registry is used to test both schema 1 and schema 2 manifests. Generally, # the version specified here should match a current release. ARG REGISTRY_VERSION=v2.3.0 # REGISTRY_VERSION_SCHEMA1 specifies the version of the regsitry to build and # install from the https://github.com/docker/distribution repository. This is # an older (pre v2.3.0) version of the registry that only supports schema1 # manifests. This version of the registry is not working on arm64, so installation # is skipped on that architecture. ARG REGISTRY_VERSION_SCHEMA1=v2.1.0 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_VERSION" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_VERSION_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # GO_SWAGGER_COMMIT specifies the version of the go-swagger binary to build and # install. Go-swagger is used in CI for validating swagger.yaml in hack/validate/swagger-gen # # Currently uses a fork from https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix, # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN dpkg --add-architecture ppc64el RUN dpkg --add-architecture s390x RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf \ crossbuild-essential-ppc64el \ crossbuild-essential-s390x FROM cross-${CROSS} AS dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems, so other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libapparmor-dev:ppc64el \ libapparmor-dev:s390x \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf \ libseccomp-dev:ppc64el \ libseccomp-dev:s390x FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomll # GOTOML_VERSION specifies the version of the tomll binary to build and install # from the https://github.com/pelletier/go-toml repository. This binary is used # in CI in the hack/validate/toml script. # # When updating this version, consider updating the github.com/pelletier/go-toml # dependency in vendor.conf accordingly. ARG GOTOML_VERSION=v1.8.1 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "github.com/pelletier/go-toml/cmd/tomll@${GOTOML_VERSION}" \ && /build/tomll --help FROM base AS vndr # VNDR_VERSION specifies the version of the vndr tool to build and install # from the https://github.com/LK4D4/vndr repository. # # The vndr tool is used to manage vendored go packages in the vendor directory, # and is pinned to a fixed version because different versions of this tool # can result in differences in the (go) files that are considered for vendoring. ARG VNDR_VERSION=v0.1.2 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "github.com/LK4D4/vndr@${VNDR_VERSION}" \ && /build/vndr --help FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_VERSION COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/containerd.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh containerd FROM base AS golangci_lint ARG GOLANGCI_LINT_VERSION=v1.23.8 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \ && /build/golangci-lint --version FROM base AS gotestsum ARG GOTESTSUM_VERSION=v1.7.0 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}" \ && /build/gotestsum --version FROM base AS shfmt ARG SHFMT_VERSION=v3.0.2 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "mvdan.cc/sh/v3/cmd/shfmt@${SHFMT_VERSION}" \ && /build/shfmt --version FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/dockercli.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh dockercli FROM runtime-dev AS runc ARG RUNC_VERSION ARG RUNC_BUILDTAGS COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/runc.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_VERSION RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/tini.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_VERSION ARG PREFIX=/build COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/rootlesskit.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ /install.sh rootlesskit \ && "${PREFIX}"/rootlesskit --version \ && "${PREFIX}"/rootlesskit-docker-proxy --help COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ bash-completion \ bzip2 \ inetutils-ping \ iproute2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ patch \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip \ zstd # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.26.1 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomll /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/bin/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.3 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.17.1 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="bullseye" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ # FIXME(sebastiaan) temporariliy disable GPG validation for these packages, as the release key has expired (https://github.com/moby/moby/pull/42931#issuecomment-943080120) echo 'deb [ allow-insecure=yes trusted=yes ] https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ && apt-get update \ && apt-get install -y --no-install-recommends criu \ && install -D /usr/sbin/criu /build/criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # REGISTRY_VERSION specifies the version of the registry to build and install # from the https://github.com/docker/distribution repository. This version of # the registry is used to test both schema 1 and schema 2 manifests. Generally, # the version specified here should match a current release. ARG REGISTRY_VERSION=v2.3.0 # REGISTRY_VERSION_SCHEMA1 specifies the version of the regsitry to build and # install from the https://github.com/docker/distribution repository. This is # an older (pre v2.3.0) version of the registry that only supports schema1 # manifests. This version of the registry is not working on arm64, so installation # is skipped on that architecture. ARG REGISTRY_VERSION_SCHEMA1=v2.1.0 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_VERSION" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_VERSION_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # GO_SWAGGER_COMMIT specifies the version of the go-swagger binary to build and # install. Go-swagger is used in CI for validating swagger.yaml in hack/validate/swagger-gen # # Currently uses a fork from https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix, # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN dpkg --add-architecture ppc64el RUN dpkg --add-architecture s390x RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf \ crossbuild-essential-ppc64el \ crossbuild-essential-s390x FROM cross-${CROSS} AS dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems, so other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libapparmor-dev:ppc64el \ libapparmor-dev:s390x \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf \ libseccomp-dev:ppc64el \ libseccomp-dev:s390x FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomll # GOTOML_VERSION specifies the version of the tomll binary to build and install # from the https://github.com/pelletier/go-toml repository. This binary is used # in CI in the hack/validate/toml script. # # When updating this version, consider updating the github.com/pelletier/go-toml # dependency in vendor.conf accordingly. ARG GOTOML_VERSION=v1.8.1 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "github.com/pelletier/go-toml/cmd/tomll@${GOTOML_VERSION}" \ && /build/tomll --help FROM base AS vndr # VNDR_VERSION specifies the version of the vndr tool to build and install # from the https://github.com/LK4D4/vndr repository. # # The vndr tool is used to manage vendored go packages in the vendor directory, # and is pinned to a fixed version because different versions of this tool # can result in differences in the (go) files that are considered for vendoring. ARG VNDR_VERSION=v0.1.2 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "github.com/LK4D4/vndr@${VNDR_VERSION}" \ && /build/vndr --help FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_VERSION COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/containerd.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh containerd FROM base AS golangci_lint ARG GOLANGCI_LINT_VERSION=v1.23.8 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \ && /build/golangci-lint --version FROM base AS gotestsum ARG GOTESTSUM_VERSION=v1.7.0 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}" \ && /build/gotestsum --version FROM base AS shfmt ARG SHFMT_VERSION=v3.0.2 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ GOBIN=/build/ GO111MODULE=on go install "mvdan.cc/sh/v3/cmd/shfmt@${SHFMT_VERSION}" \ && /build/shfmt --version FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/dockercli.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh dockercli FROM runtime-dev AS runc ARG RUNC_VERSION ARG RUNC_BUILDTAGS COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/runc.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_VERSION RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/tini.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ PREFIX=/build /install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_VERSION ARG PREFIX=/build COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/rootlesskit.installer / RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ /install.sh rootlesskit \ && "${PREFIX}"/rootlesskit --version \ && "${PREFIX}"/rootlesskit-docker-proxy --help COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ bash-completion \ bzip2 \ inetutils-ping \ iproute2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ patch \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip \ zstd # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.26.1 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomll /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/bin/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
thaJeztah
1430d849a4fe74d601896d4bbb0134e898ef8a76
4e6dbb3f5ca73fd6b29b20ff03d0da2d95b378fb
oh, nice, yes that looks better! Let me try that
thaJeztah
4,428
moby/moby
42,934
Fix file capabilites droping in Dockerfile
fixes #42655 <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** moved copyXattr function out of doCopyXattrs function, so that security capabilities are copied **- How I did it** doCopyXattrs() never reached due to copyXattrs boolean being false, as a result file capabilities not being copied. **- How to verify it** ***Test Case*** ```dockerfile FROM registry1-docker-io.repo.lab.pl.alcatel-lucent.com/library/alpine:latest RUN apk --no-cache add libcap && setcap cap_net_admin=eip /sbin/apk RUN setcap -v cap_net_admin=eip /sbin/apk ``` ***Test Result*** ``` Sending build context to Docker daemon 2.048kB Step 1/3 : FROM registry1-docker-io.repo.lab.pl.alcatel-lucent.com/library/alpine:latest latest: Pulling from library/alpine a0d0a0d46f8b: Pull complete Digest: sha256:e1c082e3d3c45cccac829840a25941e679c25d438cc8412c2fa221cf1a824e6a Status: Downloaded newer image for registry1-docker-io.repo.lab.pl.alcatel-lucent.com/library/alpine:latest ---> 14119a10abf4 Step 2/3 : RUN apk --no-cache add libcap && setcap cap_net_admin=eip /sbin/apk ---> Running in 923b46395907 fetch https://dl-cdn.alpinelinux.org/alpine/v3.14/main/x86_64/APKINDEX.tar.gz fetch https://dl-cdn.alpinelinux.org/alpine/v3.14/community/x86_64/APKINDEX.tar.gz (1/1) Installing libcap (2.50-r0) Executing busybox-1.33.1-r3.trigger OK: 6 MiB in 15 packages Removing intermediate container 923b46395907 ---> 97d6ec51d4b3 Step 3/3 : RUN setcap -v cap_net_admin=eip /sbin/apk ---> Running in 33260d42c77d /sbin/apk: OK Removing intermediate container 33260d42c77d ---> 5fc3c8660150 Successfully built 5fc3c8660150 Successfully tagged test:cap ``` **- Description for the changelog** Fixed issue of file capabilities dropping when moving to next command in Dockerfile during image building. <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: -->
null
2021-10-13 15:41:55+00:00
2022-07-27 21:31:39+00:00
daemon/graphdriver/copy/copy.go
//go:build linux // +build linux package copy // import "github.com/docker/docker/daemon/graphdriver/copy" import ( "container/list" "fmt" "io" "os" "path/filepath" "syscall" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "golang.org/x/sys/unix" ) // Mode indicates whether to use hardlink or copy content type Mode int const ( // Content creates a new file, and copies the content of the file Content Mode = iota // Hardlink creates a new hardlink to the existing file Hardlink ) func copyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { srcFile, err := os.Open(srcPath) if err != nil { return err } defer srcFile.Close() // If the destination file already exists, we shouldn't blow it away dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode()) if err != nil { return err } defer dstFile.Close() if *copyWithFileClone { err = unix.IoctlFileClone(int(dstFile.Fd()), int(srcFile.Fd())) if err == nil { return nil } *copyWithFileClone = false if err == unix.EXDEV { *copyWithFileRange = false } } if *copyWithFileRange { err = doCopyWithFileRange(srcFile, dstFile, fileinfo) // Trying the file_clone may not have caught the exdev case // as the ioctl may not have been available (therefore EINVAL) if err == unix.EXDEV || err == unix.ENOSYS { *copyWithFileRange = false } else { return err } } return legacyCopy(srcFile, dstFile) } func doCopyWithFileRange(srcFile, dstFile *os.File, fileinfo os.FileInfo) error { amountLeftToCopy := fileinfo.Size() for amountLeftToCopy > 0 { n, err := unix.CopyFileRange(int(srcFile.Fd()), nil, int(dstFile.Fd()), nil, int(amountLeftToCopy), 0) if err != nil { return err } amountLeftToCopy = amountLeftToCopy - int64(n) } return nil } func legacyCopy(srcFile io.Reader, dstFile io.Writer) error { _, err := pools.Copy(dstFile, srcFile) return err } func copyXattr(srcPath, dstPath, attr string) error { data, err := system.Lgetxattr(srcPath, attr) if err != nil { return err } if data != nil { if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { return err } } return nil } type fileID struct { dev uint64 ino uint64 } type dirMtimeInfo struct { dstPath *string stat *syscall.Stat_t } // DirCopy copies or hardlinks the contents of one directory to another, // properly handling xattrs, and soft links // // Copying xattrs can be opted out of by passing false for copyXattrs. func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { copyWithFileRange := true copyWithFileClone := true // This is a map of source file inodes to dst file paths copiedFiles := make(map[fileID]string) dirsToSetMtimes := list.New() err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { if err != nil { return err } // Rebase path relPath, err := filepath.Rel(srcDir, srcPath) if err != nil { return err } dstPath := filepath.Join(dstDir, relPath) stat, ok := f.Sys().(*syscall.Stat_t) if !ok { return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) } isHardlink := false switch mode := f.Mode(); { case mode.IsRegular(): // the type is 32bit on mips id := fileID{dev: uint64(stat.Dev), ino: stat.Ino} //nolint: unconvert if copyMode == Hardlink { isHardlink = true if err2 := os.Link(srcPath, dstPath); err2 != nil { return err2 } } else if hardLinkDstPath, ok := copiedFiles[id]; ok { if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil { return err2 } } else { if err2 := copyRegular(srcPath, dstPath, f, &copyWithFileRange, &copyWithFileClone); err2 != nil { return err2 } copiedFiles[id] = dstPath } case mode.IsDir(): if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { return err } case mode&os.ModeSymlink != 0: link, err := os.Readlink(srcPath) if err != nil { return err } if err := os.Symlink(link, dstPath); err != nil { return err } case mode&os.ModeNamedPipe != 0: fallthrough case mode&os.ModeSocket != 0: if err := unix.Mkfifo(dstPath, stat.Mode); err != nil { return err } case mode&os.ModeDevice != 0: if userns.RunningInUserNS() { // cannot create a device if running in user namespace return nil } if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { return err } default: return fmt.Errorf("unknown file type (%d / %s) for %s", f.Mode(), f.Mode().String(), srcPath) } // Everything below is copying metadata from src to dst. All this metadata // already shares an inode for hardlinks. if isHardlink { return nil } if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { return err } if copyXattrs { if err := doCopyXattrs(srcPath, dstPath); err != nil { return err } } isSymlink := f.Mode()&os.ModeSymlink != 0 // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if !isSymlink { if err := os.Chmod(dstPath, f.Mode()); err != nil { return err } } // system.Chtimes doesn't support a NOFOLLOW flag atm //nolint: unconvert if f.IsDir() { dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat}) } else if !isSymlink { aTime := time.Unix(stat.Atim.Unix()) mTime := time.Unix(stat.Mtim.Unix()) if err := system.Chtimes(dstPath, aTime, mTime); err != nil { return err } } else { ts := []syscall.Timespec{stat.Atim, stat.Mtim} if err := system.LUtimesNano(dstPath, ts); err != nil { return err } } return nil }) if err != nil { return err } for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() { mtimeInfo := e.Value.(*dirMtimeInfo) ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim} if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil { return err } } return nil } func doCopyXattrs(srcPath, dstPath string) error { if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { return err } // We need to copy this attribute if it appears in an overlay upper layer, as // this function is used to copy those. It is set by overlay if a directory // is removed and then re-created and should not inherit anything from the // same dir in the lower dir. return copyXattr(srcPath, dstPath, "trusted.overlay.opaque") }
//go:build linux // +build linux package copy // import "github.com/docker/docker/daemon/graphdriver/copy" import ( "container/list" "fmt" "io" "os" "path/filepath" "syscall" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "golang.org/x/sys/unix" ) // Mode indicates whether to use hardlink or copy content type Mode int const ( // Content creates a new file, and copies the content of the file Content Mode = iota // Hardlink creates a new hardlink to the existing file Hardlink ) func copyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { srcFile, err := os.Open(srcPath) if err != nil { return err } defer srcFile.Close() // If the destination file already exists, we shouldn't blow it away dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode()) if err != nil { return err } defer dstFile.Close() if *copyWithFileClone { err = unix.IoctlFileClone(int(dstFile.Fd()), int(srcFile.Fd())) if err == nil { return nil } *copyWithFileClone = false if err == unix.EXDEV { *copyWithFileRange = false } } if *copyWithFileRange { err = doCopyWithFileRange(srcFile, dstFile, fileinfo) // Trying the file_clone may not have caught the exdev case // as the ioctl may not have been available (therefore EINVAL) if err == unix.EXDEV || err == unix.ENOSYS { *copyWithFileRange = false } else { return err } } return legacyCopy(srcFile, dstFile) } func doCopyWithFileRange(srcFile, dstFile *os.File, fileinfo os.FileInfo) error { amountLeftToCopy := fileinfo.Size() for amountLeftToCopy > 0 { n, err := unix.CopyFileRange(int(srcFile.Fd()), nil, int(dstFile.Fd()), nil, int(amountLeftToCopy), 0) if err != nil { return err } amountLeftToCopy = amountLeftToCopy - int64(n) } return nil } func legacyCopy(srcFile io.Reader, dstFile io.Writer) error { _, err := pools.Copy(dstFile, srcFile) return err } func copyXattr(srcPath, dstPath, attr string) error { data, err := system.Lgetxattr(srcPath, attr) if err != nil { return err } if data != nil { if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { return err } } return nil } type fileID struct { dev uint64 ino uint64 } type dirMtimeInfo struct { dstPath *string stat *syscall.Stat_t } // DirCopy copies or hardlinks the contents of one directory to another, properly // handling soft links, "security.capability" and (optionally) "trusted.overlay.opaque" // xattrs. // // The copyOpaqueXattrs controls if "trusted.overlay.opaque" xattrs are copied. // Passing false disables copying "trusted.overlay.opaque" xattrs. func DirCopy(srcDir, dstDir string, copyMode Mode, copyOpaqueXattrs bool) error { copyWithFileRange := true copyWithFileClone := true // This is a map of source file inodes to dst file paths copiedFiles := make(map[fileID]string) dirsToSetMtimes := list.New() err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { if err != nil { return err } // Rebase path relPath, err := filepath.Rel(srcDir, srcPath) if err != nil { return err } dstPath := filepath.Join(dstDir, relPath) stat, ok := f.Sys().(*syscall.Stat_t) if !ok { return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) } isHardlink := false switch mode := f.Mode(); { case mode.IsRegular(): // the type is 32bit on mips id := fileID{dev: uint64(stat.Dev), ino: stat.Ino} //nolint: unconvert if copyMode == Hardlink { isHardlink = true if err2 := os.Link(srcPath, dstPath); err2 != nil { return err2 } } else if hardLinkDstPath, ok := copiedFiles[id]; ok { if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil { return err2 } } else { if err2 := copyRegular(srcPath, dstPath, f, &copyWithFileRange, &copyWithFileClone); err2 != nil { return err2 } copiedFiles[id] = dstPath } case mode.IsDir(): if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { return err } case mode&os.ModeSymlink != 0: link, err := os.Readlink(srcPath) if err != nil { return err } if err := os.Symlink(link, dstPath); err != nil { return err } case mode&os.ModeNamedPipe != 0: fallthrough case mode&os.ModeSocket != 0: if err := unix.Mkfifo(dstPath, stat.Mode); err != nil { return err } case mode&os.ModeDevice != 0: if userns.RunningInUserNS() { // cannot create a device if running in user namespace return nil } if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { return err } default: return fmt.Errorf("unknown file type (%d / %s) for %s", f.Mode(), f.Mode().String(), srcPath) } // Everything below is copying metadata from src to dst. All this metadata // already shares an inode for hardlinks. if isHardlink { return nil } if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { return err } if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { return err } if copyOpaqueXattrs { if err := doCopyXattrs(srcPath, dstPath); err != nil { return err } } isSymlink := f.Mode()&os.ModeSymlink != 0 // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if !isSymlink { if err := os.Chmod(dstPath, f.Mode()); err != nil { return err } } // system.Chtimes doesn't support a NOFOLLOW flag atm //nolint: unconvert if f.IsDir() { dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat}) } else if !isSymlink { aTime := time.Unix(stat.Atim.Unix()) mTime := time.Unix(stat.Mtim.Unix()) if err := system.Chtimes(dstPath, aTime, mTime); err != nil { return err } } else { ts := []syscall.Timespec{stat.Atim, stat.Mtim} if err := system.LUtimesNano(dstPath, ts); err != nil { return err } } return nil }) if err != nil { return err } for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() { mtimeInfo := e.Value.(*dirMtimeInfo) ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim} if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil { return err } } return nil } func doCopyXattrs(srcPath, dstPath string) error { // We need to copy this attribute if it appears in an overlay upper layer, as // this function is used to copy those. It is set by overlay if a directory // is removed and then re-created and should not inherit anything from the // same dir in the lower dir. return copyXattr(srcPath, dstPath, "trusted.overlay.opaque") }
abdulrahimiliasu
2bfc7aedab34758e62ea102acaa2cdca712042c8
9772c68e54eb93d8396f79e0a92d0e2d58d61668
Changes in this file are irrelevant to the issue. Please consider removing.
szubersk
4,429
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, Zstd: {0x28, 0xb5, 0x2f, 0xfd}, } { if bytes.HasPrefix(source, m) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "encoding/binary" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } const ( zstdMagicSkippableStart = 0x184D2A50 zstdMagicSkippableMask = 0xFFFFFFF0 ) var ( bzip2Magic = []byte{0x42, 0x5A, 0x68} gzipMagic = []byte{0x1F, 0x8B, 0x08} xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} ) type matcher = func([]byte) bool func magicNumberMatcher(m []byte) matcher { return func(source []byte) bool { return bytes.HasPrefix(source, m) } } // zstdMatcher detects zstd compression algorithm. // Zstandard compressed data is made of one or more frames. // There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. // See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. func zstdMatcher() matcher { return func(source []byte) bool { if bytes.HasPrefix(source, zstdMagic) { // Zstandard frame return true } // skippable frame if len(source) < 8 { return false } // magic number from 0x184D2A50 to 0x184D2A5F. if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { return true } return false } } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { compressionMap := map[Compression]matcher{ Bzip2: magicNumberMatcher(bzip2Magic), Gzip: magicNumberMatcher(gzipMagic), Xz: magicNumberMatcher(xzMagic), Zstd: zstdMatcher(), } for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { fn := compressionMap[compression] if fn(source) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
Wondering if this is something to contribute to github.com/klauspost/compress/zstd (so that other consumers of that package can also implement the same detection)
thaJeztah
4,430
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, Zstd: {0x28, 0xb5, 0x2f, 0xfd}, } { if bytes.HasPrefix(source, m) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "encoding/binary" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } const ( zstdMagicSkippableStart = 0x184D2A50 zstdMagicSkippableMask = 0xFFFFFFF0 ) var ( bzip2Magic = []byte{0x42, 0x5A, 0x68} gzipMagic = []byte{0x1F, 0x8B, 0x08} xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} ) type matcher = func([]byte) bool func magicNumberMatcher(m []byte) matcher { return func(source []byte) bool { return bytes.HasPrefix(source, m) } } // zstdMatcher detects zstd compression algorithm. // Zstandard compressed data is made of one or more frames. // There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. // See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. func zstdMatcher() matcher { return func(source []byte) bool { if bytes.HasPrefix(source, zstdMagic) { // Zstandard frame return true } // skippable frame if len(source) < 8 { return false } // magic number from 0x184D2A50 to 0x184D2A5F. if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { return true } return false } } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { compressionMap := map[Compression]matcher{ Bzip2: magicNumberMatcher(bzip2Magic), Gzip: magicNumberMatcher(gzipMagic), Xz: magicNumberMatcher(xzMagic), Zstd: zstdMatcher(), } for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { fn := compressionMap[compression] if fn(source) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
I think that library is probably aimed at users who know what compression algorithm they should choose, and it is more concerned with the implementation of the specified algorithm, so detecting the compression algorithm is not its main function.
dkkb
4,431
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, Zstd: {0x28, 0xb5, 0x2f, 0xfd}, } { if bytes.HasPrefix(source, m) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "encoding/binary" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } const ( zstdMagicSkippableStart = 0x184D2A50 zstdMagicSkippableMask = 0xFFFFFFF0 ) var ( bzip2Magic = []byte{0x42, 0x5A, 0x68} gzipMagic = []byte{0x1F, 0x8B, 0x08} xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} ) type matcher = func([]byte) bool func magicNumberMatcher(m []byte) matcher { return func(source []byte) bool { return bytes.HasPrefix(source, m) } } // zstdMatcher detects zstd compression algorithm. // Zstandard compressed data is made of one or more frames. // There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. // See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. func zstdMatcher() matcher { return func(source []byte) bool { if bytes.HasPrefix(source, zstdMagic) { // Zstandard frame return true } // skippable frame if len(source) < 8 { return false } // magic number from 0x184D2A50 to 0x184D2A5F. if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { return true } return false } } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { compressionMap := map[Compression]matcher{ Bzip2: magicNumberMatcher(bzip2Magic), Gzip: magicNumberMatcher(gzipMagic), Xz: magicNumberMatcher(xzMagic), Zstd: zstdMatcher(), } for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { fn := compressionMap[compression] if fn(source) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
Yeah, my train of thought here is that; - I know at least multiple runtimes / container-related projects that will likely (?) have to implement this detection (buildkit, containerd, moby, cri-o?) - Afaik, most (if not all) of those currently use the klauspost module - The klauspost module also has definitions for these magic numbers, and (from a quick look) looks to have an implementation to detect; https://github.com/klauspost/compress/blob/f118b5f6f7e720b1f1c1464bb11904261c06f2f3/zstd/framedec.go#L57-L60 https://github.com/klauspost/compress/blob/baa1f1e42d738c71ea4a18b673ebacee0ce520b8/zstd/decodeheader.go#L79 So _perhaps_ it could be within scope of that project to have each compression export a "detection" function (and possibly a utility package that allows to detect compression for all, but that could be left to consumers) Of course, an alternative could be to have a small module somewhere that can be used by other the projects mentioned, but having it in the klauspost project would help making sure the detection doesn't diverge. @klauspost @giuseppe @tonistiigi any thoughts? (it's just an idea; not a show-stopper)
thaJeztah
4,432
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, Zstd: {0x28, 0xb5, 0x2f, 0xfd}, } { if bytes.HasPrefix(source, m) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "encoding/binary" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } const ( zstdMagicSkippableStart = 0x184D2A50 zstdMagicSkippableMask = 0xFFFFFFF0 ) var ( bzip2Magic = []byte{0x42, 0x5A, 0x68} gzipMagic = []byte{0x1F, 0x8B, 0x08} xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} ) type matcher = func([]byte) bool func magicNumberMatcher(m []byte) matcher { return func(source []byte) bool { return bytes.HasPrefix(source, m) } } // zstdMatcher detects zstd compression algorithm. // Zstandard compressed data is made of one or more frames. // There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. // See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. func zstdMatcher() matcher { return func(source []byte) bool { if bytes.HasPrefix(source, zstdMagic) { // Zstandard frame return true } // skippable frame if len(source) < 8 { return false } // magic number from 0x184D2A50 to 0x184D2A5F. if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { return true } return false } } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { compressionMap := map[Compression]matcher{ Bzip2: magicNumberMatcher(bzip2Magic), Gzip: magicNumberMatcher(gzipMagic), Xz: magicNumberMatcher(xzMagic), Zstd: zstdMatcher(), } for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { fn := compressionMap[compression] if fn(source) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
fwiw there is already a [frame header decoder](https://pkg.go.dev/github.com/klauspost/compress/zstd#Header.Decode) function where you can check if the [HeaderMaxSize = 17](https://pkg.go.dev/github.com/klauspost/compress/zstd#pkg-constants) bytes is a valid zstd (skippable) header.
klauspost
4,433
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, Zstd: {0x28, 0xb5, 0x2f, 0xfd}, } { if bytes.HasPrefix(source, m) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "encoding/binary" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } const ( zstdMagicSkippableStart = 0x184D2A50 zstdMagicSkippableMask = 0xFFFFFFF0 ) var ( bzip2Magic = []byte{0x42, 0x5A, 0x68} gzipMagic = []byte{0x1F, 0x8B, 0x08} xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} ) type matcher = func([]byte) bool func magicNumberMatcher(m []byte) matcher { return func(source []byte) bool { return bytes.HasPrefix(source, m) } } // zstdMatcher detects zstd compression algorithm. // Zstandard compressed data is made of one or more frames. // There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. // See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. func zstdMatcher() matcher { return func(source []byte) bool { if bytes.HasPrefix(source, zstdMagic) { // Zstandard frame return true } // skippable frame if len(source) < 8 { return false } // magic number from 0x184D2A50 to 0x184D2A5F. if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { return true } return false } } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { compressionMap := map[Compression]matcher{ Bzip2: magicNumberMatcher(bzip2Magic), Gzip: magicNumberMatcher(gzipMagic), Xz: magicNumberMatcher(xzMagic), Zstd: zstdMatcher(), } for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { fn := compressionMap[compression] if fn(source) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
Linter is failing on these consts and vars because they're missing a GoDoc. It looks like we're only using it internally in this package (at least currently), so I suggest to un-export them; ``` pkg/archive/archive.go:129:2: exported const ZstdMagicSkippableStart should have comment (or a comment on this block) or be unexported (golint) ZstdMagicSkippableStart = 0x184D2A50 ^ pkg/archive/archive.go:134:2: exported var `Bzip2Magic` should have comment or be unexported (golint) Bzip2Magic = []byte{0x42, 0x5A, 0x68} ^ ```
thaJeztah
4,434
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, Zstd: {0x28, 0xb5, 0x2f, 0xfd}, } { if bytes.HasPrefix(source, m) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "encoding/binary" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } const ( zstdMagicSkippableStart = 0x184D2A50 zstdMagicSkippableMask = 0xFFFFFFF0 ) var ( bzip2Magic = []byte{0x42, 0x5A, 0x68} gzipMagic = []byte{0x1F, 0x8B, 0x08} xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} ) type matcher = func([]byte) bool func magicNumberMatcher(m []byte) matcher { return func(source []byte) bool { return bytes.HasPrefix(source, m) } } // zstdMatcher detects zstd compression algorithm. // Zstandard compressed data is made of one or more frames. // There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. // See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. func zstdMatcher() matcher { return func(source []byte) bool { if bytes.HasPrefix(source, zstdMagic) { // Zstandard frame return true } // skippable frame if len(source) < 8 { return false } // magic number from 0x184D2A50 to 0x184D2A5F. if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { return true } return false } } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { compressionMap := map[Compression]matcher{ Bzip2: magicNumberMatcher(bzip2Magic), Gzip: magicNumberMatcher(gzipMagic), Xz: magicNumberMatcher(xzMagic), Zstd: zstdMatcher(), } for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { fn := compressionMap[compression] if fn(source) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
Linting failure here, and looks indeed that the `else` is redundant here; ``` pkg/archive/archive.go:156:9: `if` block ends with a `return` statement, so drop this `else` and outdent its block (golint) } else { ```
thaJeztah
4,435
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, Zstd: {0x28, 0xb5, 0x2f, 0xfd}, } { if bytes.HasPrefix(source, m) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "encoding/binary" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } const ( zstdMagicSkippableStart = 0x184D2A50 zstdMagicSkippableMask = 0xFFFFFFF0 ) var ( bzip2Magic = []byte{0x42, 0x5A, 0x68} gzipMagic = []byte{0x1F, 0x8B, 0x08} xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} ) type matcher = func([]byte) bool func magicNumberMatcher(m []byte) matcher { return func(source []byte) bool { return bytes.HasPrefix(source, m) } } // zstdMatcher detects zstd compression algorithm. // Zstandard compressed data is made of one or more frames. // There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. // See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. func zstdMatcher() matcher { return func(source []byte) bool { if bytes.HasPrefix(source, zstdMagic) { // Zstandard frame return true } // skippable frame if len(source) < 8 { return false } // magic number from 0x184D2A50 to 0x184D2A5F. if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { return true } return false } } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { compressionMap := map[Compression]matcher{ Bzip2: magicNumberMatcher(bzip2Magic), Gzip: magicNumberMatcher(gzipMagic), Xz: magicNumberMatcher(xzMagic), Zstd: zstdMatcher(), } for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { fn := compressionMap[compression] if fn(source) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
Given that we don't use `ZstDetector` outside of this package; could you un-export it? (it's easy to export it if we would ever need it outside of this package, but the reverse can be more work if some external project started using it, so I prefer to be a bit conservative on exporting things 😅)
thaJeztah
4,436
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, Zstd: {0x28, 0xb5, 0x2f, 0xfd}, } { if bytes.HasPrefix(source, m) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "encoding/binary" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } const ( zstdMagicSkippableStart = 0x184D2A50 zstdMagicSkippableMask = 0xFFFFFFF0 ) var ( bzip2Magic = []byte{0x42, 0x5A, 0x68} gzipMagic = []byte{0x1F, 0x8B, 0x08} xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} ) type matcher = func([]byte) bool func magicNumberMatcher(m []byte) matcher { return func(source []byte) bool { return bytes.HasPrefix(source, m) } } // zstdMatcher detects zstd compression algorithm. // Zstandard compressed data is made of one or more frames. // There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. // See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. func zstdMatcher() matcher { return func(source []byte) bool { if bytes.HasPrefix(source, zstdMagic) { // Zstandard frame return true } // skippable frame if len(source) < 8 { return false } // magic number from 0x184D2A50 to 0x184D2A5F. if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { return true } return false } } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { compressionMap := map[Compression]matcher{ Bzip2: magicNumberMatcher(bzip2Magic), Gzip: magicNumberMatcher(gzipMagic), Xz: magicNumberMatcher(xzMagic), Zstd: zstdMatcher(), } for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { fn := compressionMap[compression] if fn(source) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
Done.
dkkb
4,437
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, Zstd: {0x28, 0xb5, 0x2f, 0xfd}, } { if bytes.HasPrefix(source, m) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "encoding/binary" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } const ( zstdMagicSkippableStart = 0x184D2A50 zstdMagicSkippableMask = 0xFFFFFFF0 ) var ( bzip2Magic = []byte{0x42, 0x5A, 0x68} gzipMagic = []byte{0x1F, 0x8B, 0x08} xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} ) type matcher = func([]byte) bool func magicNumberMatcher(m []byte) matcher { return func(source []byte) bool { return bytes.HasPrefix(source, m) } } // zstdMatcher detects zstd compression algorithm. // Zstandard compressed data is made of one or more frames. // There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. // See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. func zstdMatcher() matcher { return func(source []byte) bool { if bytes.HasPrefix(source, zstdMagic) { // Zstandard frame return true } // skippable frame if len(source) < 8 { return false } // magic number from 0x184D2A50 to 0x184D2A5F. if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { return true } return false } } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { compressionMap := map[Compression]matcher{ Bzip2: magicNumberMatcher(bzip2Magic), Gzip: magicNumberMatcher(gzipMagic), Xz: magicNumberMatcher(xzMagic), Zstd: zstdMatcher(), } for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { fn := compressionMap[compression] if fn(source) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
Done.
dkkb
4,438
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, Zstd: {0x28, 0xb5, 0x2f, 0xfd}, } { if bytes.HasPrefix(source, m) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "encoding/binary" "fmt" "io" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping *idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz // Zstd is zstd compression algorithm. Zstd ) const ( // AUFSWhiteoutFormat is the default format for whiteouts AUFSWhiteoutFormat WhiteoutFormat = iota // OverlayWhiteoutFormat formats whiteout according to the overlay // standard. OverlayWhiteoutFormat ) const ( modeISDIR = 040000 // Directory modeISFIFO = 010000 // FIFO modeISREG = 0100000 // Regular file modeISLNK = 0120000 // Symbolic link modeISBLK = 060000 // Block special file modeISCHR = 020000 // Character special file modeISSOCK = 0140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } const ( zstdMagicSkippableStart = 0x184D2A50 zstdMagicSkippableMask = 0xFFFFFFF0 ) var ( bzip2Magic = []byte{0x42, 0x5A, 0x68} gzipMagic = []byte{0x1F, 0x8B, 0x08} xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} ) type matcher = func([]byte) bool func magicNumberMatcher(m []byte) matcher { return func(source []byte) bool { return bytes.HasPrefix(source, m) } } // zstdMatcher detects zstd compression algorithm. // Zstandard compressed data is made of one or more frames. // There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. // See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. func zstdMatcher() matcher { return func(source []byte) bool { if bytes.HasPrefix(source, zstdMagic) { // Zstandard frame return true } // skippable frame if len(source) < 8 { return false } // magic number from 0x184D2A50 to 0x184D2A5F. if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { return true } return false } } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { compressionMap := map[Compression]matcher{ Bzip2: magicNumberMatcher(bzip2Magic), Gzip: magicNumberMatcher(gzipMagic), Xz: magicNumberMatcher(xzMagic), Zstd: zstdMatcher(), } for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { fn := compressionMap[compression] if fn(source) { return compression } } return Uncompressed } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { logrus.Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } logrus.Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { return ioutils.NewReadCloserWrapper(readBuf, func() error { cancel() return readBuf.Close() }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if err == io.EOF { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if _, err := pools.Copy(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" } return "" } // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } return hdr, nil } // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar // https://github.com/golang/go/commit/66b5a2f func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { fm := fi.Mode() switch { case fm.IsRegular(): mode |= modeISREG case fi.IsDir(): mode |= modeISDIR case fm&os.ModeSymlink != 0: mode |= modeISLNK case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { mode |= modeISCHR } else { mode |= modeISBLK } case fm&os.ModeNamedPipe != 0: mode |= modeISFIFO case fm&os.ModeSocket != 0: mode |= modeISSOCK } return mode } // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 length = xattrCapsSz2 } hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability[:length]) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping *idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = CanonicalTarNameForPath(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use system.OpenSequential to ensure we use sequential file // access on Windows to avoid depleting the standby list. // On Linux, this equates to a regular os.Open. file, err := system.OpenSequential(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file. We use system.OpenFileSequential to use sequential // file access to avoid depleting the standby list on Windows. // On Linux, this equates to a regular os.OpenFile file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return nil, err } go func() { ta := newTarAppender( idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), compressWriter, options.ChownOpts, ) ta.WhiteoutConverter = whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Errorf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Errorf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Errorf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] var ( parentMatched []bool parentDirs []string ) walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatched = parentMatched[:len(parentMatched)-1] } if len(parentMatched) != 0 { skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1]) } else { skip, err = pm.MatchesOrParentMatches(relFilePath) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatched = append(parentMatched, skip) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err } } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := remapIDs(idMapping, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() options := &TarOptions{ UIDMaps: archiver.IDMapping.UIDs(), GIDMaps: archiver.IDMapping.GIDs(), } return archiver.Untar(archive, dst, options) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(done) }() return ioutils.NewReadCloserWrapper(pipeR, func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := pipeR.Close() <-done return err }), nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
Done.
dkkb
4,439
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive_test.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestDetectCompressionZstd(t *testing.T) { // test zstd compression without skippable frames. compressedData := []byte{ 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression := DetectCompression(compressedData) if compression != Zstd { t.Fatal("Unexpected compression") } // test zstd compression with skippable frames. hex := []byte{ 0x50, 0x2a, 0x4d, 0x18, // magic number of skippable frame: 0x184D2A50 to 0x184D2A5F 0x04, 0x00, 0x00, 0x00, // frame size 0x5d, 0x00, 0x00, 0x00, // user data 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression = DetectCompression(hex) if compression != Zstd { t.Fatal("Unexpected compression") } } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
Perhaps we should just use a fixed value for this as well (instead of generating a zstd compressed string on the fly)
thaJeztah
4,440
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive_test.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestDetectCompressionZstd(t *testing.T) { // test zstd compression without skippable frames. compressedData := []byte{ 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression := DetectCompression(compressedData) if compression != Zstd { t.Fatal("Unexpected compression") } // test zstd compression with skippable frames. hex := []byte{ 0x50, 0x2a, 0x4d, 0x18, // magic number of skippable frame: 0x184D2A50 to 0x184D2A5F 0x04, 0x00, 0x00, 0x00, // frame size 0x5d, 0x00, 0x00, 0x00, // user data 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression = DetectCompression(hex) if compression != Zstd { t.Fatal("Unexpected compression") } } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
Wondering what we're testing here; it looks like here we're testing that our test-data was valid; if so, it should probably be validated _before_ the `DetectCompression(hex)` check above. I'm a bit on the fence though; effectively we'd want a fixture here (this check would only fail if there was some change / bug in `github.com/klauspost/compress/zstd`), so I think it'd be fine to just (also see my other comment above) use a fixed value that we know is correct, and don't create / validate these on the fly.
thaJeztah
4,441
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive_test.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestDetectCompressionZstd(t *testing.T) { // test zstd compression without skippable frames. compressedData := []byte{ 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression := DetectCompression(compressedData) if compression != Zstd { t.Fatal("Unexpected compression") } // test zstd compression with skippable frames. hex := []byte{ 0x50, 0x2a, 0x4d, 0x18, // magic number of skippable frame: 0x184D2A50 to 0x184D2A5F 0x04, 0x00, 0x00, 0x00, // frame size 0x5d, 0x00, 0x00, 0x00, // user data 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression = DetectCompression(hex) if compression != Zstd { t.Fatal("Unexpected compression") } } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
These helper functions should not be exported, but see my other comment on using a fixture / fixed value, to prevent that we're testing if `github.com/klauspost/compress/zstd` is working as expected (which is out of scope for this test)
thaJeztah
4,442
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive_test.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestDetectCompressionZstd(t *testing.T) { // test zstd compression without skippable frames. compressedData := []byte{ 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression := DetectCompression(compressedData) if compression != Zstd { t.Fatal("Unexpected compression") } // test zstd compression with skippable frames. hex := []byte{ 0x50, 0x2a, 0x4d, 0x18, // magic number of skippable frame: 0x184D2A50 to 0x184D2A5F 0x04, 0x00, 0x00, 0x00, // frame size 0x5d, 0x00, 0x00, 0x00, // user data 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression = DetectCompression(hex) if compression != Zstd { t.Fatal("Unexpected compression") } } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
Perhaps include `zstd` in the name here (e.g. `TestDetectCompressionZstd`), as that's currently the only compression we're detecting here.
thaJeztah
4,443
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive_test.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestDetectCompressionZstd(t *testing.T) { // test zstd compression without skippable frames. compressedData := []byte{ 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression := DetectCompression(compressedData) if compression != Zstd { t.Fatal("Unexpected compression") } // test zstd compression with skippable frames. hex := []byte{ 0x50, 0x2a, 0x4d, 0x18, // magic number of skippable frame: 0x184D2A50 to 0x184D2A5F 0x04, 0x00, 0x00, 0x00, // frame size 0x5d, 0x00, 0x00, 0x00, // user data 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression = DetectCompression(hex) if compression != Zstd { t.Fatal("Unexpected compression") } } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
Done.
dkkb
4,444
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive_test.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestDetectCompressionZstd(t *testing.T) { // test zstd compression without skippable frames. compressedData := []byte{ 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression := DetectCompression(compressedData) if compression != Zstd { t.Fatal("Unexpected compression") } // test zstd compression with skippable frames. hex := []byte{ 0x50, 0x2a, 0x4d, 0x18, // magic number of skippable frame: 0x184D2A50 to 0x184D2A5F 0x04, 0x00, 0x00, 0x00, // frame size 0x5d, 0x00, 0x00, 0x00, // user data 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression = DetectCompression(hex) if compression != Zstd { t.Fatal("Unexpected compression") } } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
Sounds good.
dkkb
4,445
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive_test.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestDetectCompressionZstd(t *testing.T) { // test zstd compression without skippable frames. compressedData := []byte{ 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression := DetectCompression(compressedData) if compression != Zstd { t.Fatal("Unexpected compression") } // test zstd compression with skippable frames. hex := []byte{ 0x50, 0x2a, 0x4d, 0x18, // magic number of skippable frame: 0x184D2A50 to 0x184D2A5F 0x04, 0x00, 0x00, 0x00, // frame size 0x5d, 0x00, 0x00, 0x00, // user data 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression = DetectCompression(hex) if compression != Zstd { t.Fatal("Unexpected compression") } } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
Ok, I' ll un-export it.
dkkb
4,446
moby/moby
42,862
compression: support zstd with skippable frame
Signed-off-by: Da McGrady <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** As a matter of fact, there are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. So we should probably support zstd algorithms with skippable frames. See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. The structure of a single Zstandard frame is as follows, the magic number of Zstandard frame is 0xFD2FB528. ``` +--------------------+------------+ | Magic_Number | 4 bytes | +--------------------+------------+ | Frame_Header | 2-14 bytes | +--------------------+------------+ | Data_Block | n bytes | +--------------------+------------+ | [More Data Blocks] | | +--------------------+------------+ | [Content Checksum] | 0-4 bytes | +--------------------+------------+ ``` Skippable frames allow the insertion of user-defined data into a flow of concatenated frames. Its design is pretty straightforward, with the sole objective to allow the decoder to quickly skip over user-defined data and continue decoding. ``` +--------------+------------+-----------+ | Magic_Number | Frame_Size | User_Data | +--------------+------------+-----------+ | 4 bytes | 4 bytes | n bytes | +--------------+------------+-----------+ ``` Magic_Number: 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. Frame_Size: This is the size `n` of the following UserData, 4 bytes, little-endian format, unsigned 32-bits. **- How I did it** In order to accommodate more complicated detectors for Zstd, I changed the `DetectCompression` function. **- How to verify it** To verify this PR, I created a new test named `TestDetectCompression`. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)** <img src="https://user-images.githubusercontent.com/82504881/133783740-8bbaf6c0-25d6-4144-8812-047ea47f65b2.jpeg" width=400>
null
2021-09-17 12:25:11+00:00
2021-10-21 18:29:12+00:00
pkg/archive/archive_test.go
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "compress/gzip" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestExtensionZstd(t *testing.T) { compression := Zstd output := compression.Extension() if output != "tar.zst" { t.Fatalf("The extension of a zstd archive should be 'tar.zst'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := os.MkdirTemp("", "docker-archive-test") assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } os.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := os.MkdirTemp("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestDetectCompressionZstd(t *testing.T) { // test zstd compression without skippable frames. compressedData := []byte{ 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression := DetectCompression(compressedData) if compression != Zstd { t.Fatal("Unexpected compression") } // test zstd compression with skippable frames. hex := []byte{ 0x50, 0x2a, 0x4d, 0x18, // magic number of skippable frame: 0x184D2A50 to 0x184D2A5F 0x04, 0x00, 0x00, 0x00, // frame size 0x5d, 0x00, 0x00, 0x00, // user data 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression = DetectCompression(hex) if compression != Zstd { t.Fatal("Unexpected compression") } } func TestTarUntar(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-chown-opt") assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = os.WriteFile(filePath, []byte("hello world"), 0700) assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { ContainerID: 0, HostID: 0, Size: 65536, }, 1: { ContainerID: 0, HostID: 100000, Size: 65536, }, } cases := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &idtools.Identity{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &idtools.Identity{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } func TestTarWithOptions(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := os.MkdirTemp("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not crated for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", "pax-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, errors.Is(err, os.ErrNotExist)) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := os.MkdirTemp("", "prefix-test") assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } os.Setenv("MOBY_DISABLE_PIGZ", "true") defer os.Unsetenv("MOBY_DISABLE_PIGZ") r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // For the bufio pool outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } }
dkkb
693697bddaf2d80156bab54f447963f48eb92e78
872c64cd661073191239313457be8830e674eae7
Actually, the purpose of this step is to ensure that the compressed data is in correct zstd format, so I decompress it and check the value. This section was removed.
dkkb
4,447
moby/moby
42,838
Add an option to specify log format for awslogs driver
Added an option 'awslogs-format' to allow specifying a log format for the logs sent CloudWatch from the aws log driver. For now, only the 'json/emf' format is supported. If no option is provided, the log format header in the request to CloudWatch will be omitted as before. Signed-off-by: James Sanders <[email protected]> **- What I did** - Added an option `awslogs-format` to allow specifying a log format for the logs sent CloudWatch from the aws log driver. - For now, only the `json/emf` format is supported. - If no option is provided, the log format header in the request to CloudWatch will be omitted as before. - This addresses https://github.com/moby/moby/issues/42731 - Users cannot provide the `awslogs-datetime-format` or the `awslogs-multiline-pattern` options when they set the `awslogs-format` option cannot be set to `json/emf`. This is because both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options modify the log line delimiter. Given for now `json/emf` format must be a valid json line, it does not make sense to allow for different line delimiters when using `json/emf`. **- How I did it** - Modified the `awslogs` log driver to add a new CloudWatch client "handler" (basically middleware) to add the `x-amzn-logs-format` header to the request when the log format option is given and is `json/emf`. - Tried to follow convention at every step of the way. - Based key name off of the key name used in the [fluentd plugin](https://docs.fluentbit.io/manual/pipeline/outputs/cloudwatch) **- How to verify it** - Unit Testing - Manual integration testing: - Ran container with no format option provided and verified logs were still published (no regression) - Ran container in development environment with `--log-opt awslogs-format=json/emf` enabled and saw metrics posted to CloudWatch: ```bash docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format=json/emf \ emf-logger \ run ``` - Metrics in CloudWatch: <img width="1091" alt="image" src="https://user-images.githubusercontent.com/10730172/132904614-a4c05775-a495-449c-9468-308edf9a0aa6.png"> - Attempted to specify a "random" log format and saw an error was thrown ```bash # docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format=random \ emf-logger \ run docker: Error response from daemon: unsupported log format 'random'. ``` - Attempted to set `awslogs-datetime-format` and saw an error was thrown ```bash docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format='json/emf' \ --log-opt awslogs-datetime-format='YYYY-mm-dd' \ emf-logger \ serve docker: Error response from daemon: you cannot configure log opt 'awslogs-datetime-format' or 'awslogs-multiline-pattern' when log opt 'awslogs-format' is set to 'json/emf'. ``` - Attempted to set `awslogs-multiline-pattern` and saw an error was thrown ```bash docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format='json/emf' \ --log-opt awslogs-multiline-pattern='-------' \ emf-logger \ serve docker: Error response from daemon: you cannot configure log opt 'awslogs-datetime-format' or 'awslogs-multiline-pattern' when log opt 'awslogs-format' is set to 'json/emf'. ``` **- Description for the changelog** Added a new option to `awslogs` log driver to specify the log format that is sent to CloudWatch. **- A picture of a cute animal (not mandatory but encouraged)** ![image](https://user-images.githubusercontent.com/10730172/132908691-81dd1525-5133-4ac2-938e-b1e7d84e30c9.png)
null
2021-09-10 20:09:50+00:00
2021-12-02 19:48:07+00:00
daemon/logger/awslogs/cloudwatchlogs.go
// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs package awslogs // import "github.com/docker/docker/daemon/logger/awslogs" import ( "fmt" "os" "regexp" "runtime" "sort" "strconv" "strings" "sync" "time" "unicode/utf8" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/dockerversion" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( name = "awslogs" regionKey = "awslogs-region" endpointKey = "awslogs-endpoint" regionEnvKey = "AWS_REGION" logGroupKey = "awslogs-group" logStreamKey = "awslogs-stream" logCreateGroupKey = "awslogs-create-group" logCreateStreamKey = "awslogs-create-stream" tagKey = "tag" datetimeFormatKey = "awslogs-datetime-format" multilinePatternKey = "awslogs-multiline-pattern" credentialsEndpointKey = "awslogs-credentials-endpoint" forceFlushIntervalKey = "awslogs-force-flush-interval-seconds" maxBufferedEventsKey = "awslogs-max-buffered-events" defaultForceFlushInterval = 5 * time.Second defaultMaxBufferedEvents = 4096 // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html perEventBytes = 26 maximumBytesPerPut = 1048576 maximumLogEventsPerPut = 10000 // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html // Because the events are interpreted as UTF-8 encoded Unicode, invalid UTF-8 byte sequences are replaced with the // Unicode replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To compensate for that and to avoid // splitting valid UTF-8 characters into invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. maximumBytesPerEvent = 262144 - perEventBytes resourceAlreadyExistsCode = "ResourceAlreadyExistsException" dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" invalidSequenceTokenCode = "InvalidSequenceTokenException" resourceNotFoundCode = "ResourceNotFoundException" credentialsEndpoint = "http://169.254.170.2" userAgentHeader = "User-Agent" ) type logStream struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration multilinePattern *regexp.Regexp client api messages chan *logger.Message lock sync.RWMutex closed bool sequenceToken *string } type logStreamConfig struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration maxBufferedEvents int multilinePattern *regexp.Regexp } var _ logger.SizedLogger = &logStream{} type api interface { CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) } type regionFinder interface { Region() (string, error) } type wrappedEvent struct { inputLogEvent *cloudwatchlogs.InputLogEvent insertOrder int } type byTimestamp []wrappedEvent // init registers the awslogs driver func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // eventBatch holds the events that are batched for submission and the // associated data about it. // // Warning: this type is not threadsafe and must not be used // concurrently. This type is expected to be consumed in a single go // routine and never concurrently. type eventBatch struct { batch []wrappedEvent bytes int } // New creates an awslogs logger using the configuration passed in on the // context. Supported context configuration variables are awslogs-region, // awslogs-endpoint, awslogs-group, awslogs-stream, awslogs-create-group, // awslogs-multiline-pattern and awslogs-datetime-format. // When available, configuration is also taken from environment variables // AWS_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, the shared credentials // file (~/.aws/credentials), and the EC2 Instance Metadata Service. func New(info logger.Info) (logger.Logger, error) { containerStreamConfig, err := newStreamConfig(info) if err != nil { return nil, err } client, err := newAWSLogsClient(info) if err != nil { return nil, err } containerStream := &logStream{ logStreamName: containerStreamConfig.logStreamName, logGroupName: containerStreamConfig.logGroupName, logCreateGroup: containerStreamConfig.logCreateGroup, logCreateStream: containerStreamConfig.logCreateStream, logNonBlocking: containerStreamConfig.logNonBlocking, forceFlushInterval: containerStreamConfig.forceFlushInterval, multilinePattern: containerStreamConfig.multilinePattern, client: client, messages: make(chan *logger.Message, containerStreamConfig.maxBufferedEvents), } creationDone := make(chan bool) if containerStream.logNonBlocking { go func() { backoff := 1 maxBackoff := 32 for { // If logger is closed we are done containerStream.lock.RLock() if containerStream.closed { containerStream.lock.RUnlock() break } containerStream.lock.RUnlock() err := containerStream.create() if err == nil { break } time.Sleep(time.Duration(backoff) * time.Second) if backoff < maxBackoff { backoff *= 2 } logrus. WithError(err). WithField("container-id", info.ContainerID). WithField("container-name", info.ContainerName). Error("Error while trying to initialize awslogs. Retrying in: ", backoff, " seconds") } close(creationDone) }() } else { if err = containerStream.create(); err != nil { return nil, err } close(creationDone) } go containerStream.collectBatch(creationDone) return containerStream, nil } // Parses most of the awslogs- options and prepares a config object to be used for newing the actual stream // It has been formed out to ease Utest of the New above func newStreamConfig(info logger.Info) (*logStreamConfig, error) { logGroupName := info.Config[logGroupKey] logStreamName, err := loggerutils.ParseLogTag(info, "{{.FullID}}") if err != nil { return nil, err } logCreateGroup := false if info.Config[logCreateGroupKey] != "" { logCreateGroup, err = strconv.ParseBool(info.Config[logCreateGroupKey]) if err != nil { return nil, err } } logNonBlocking := info.Config["mode"] == "non-blocking" forceFlushInterval := defaultForceFlushInterval if info.Config[forceFlushIntervalKey] != "" { forceFlushIntervalAsInt, err := strconv.Atoi(info.Config[forceFlushIntervalKey]) if err != nil { return nil, err } forceFlushInterval = time.Duration(forceFlushIntervalAsInt) * time.Second } maxBufferedEvents := int(defaultMaxBufferedEvents) if info.Config[maxBufferedEventsKey] != "" { maxBufferedEvents, err = strconv.Atoi(info.Config[maxBufferedEventsKey]) if err != nil { return nil, err } } if info.Config[logStreamKey] != "" { logStreamName = info.Config[logStreamKey] } logCreateStream := true if info.Config[logCreateStreamKey] != "" { logCreateStream, err = strconv.ParseBool(info.Config[logCreateStreamKey]) if err != nil { return nil, err } } multilinePattern, err := parseMultilineOptions(info) if err != nil { return nil, err } containerStreamConfig := &logStreamConfig{ logStreamName: logStreamName, logGroupName: logGroupName, logCreateGroup: logCreateGroup, logCreateStream: logCreateStream, logNonBlocking: logNonBlocking, forceFlushInterval: forceFlushInterval, maxBufferedEvents: maxBufferedEvents, multilinePattern: multilinePattern, } return containerStreamConfig, nil } // Parses awslogs-multiline-pattern and awslogs-datetime-format options // If awslogs-datetime-format is present, convert the format from strftime // to regexp and return. // If awslogs-multiline-pattern is present, compile regexp and return func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) { dateTimeFormat := info.Config[datetimeFormatKey] multilinePatternKey := info.Config[multilinePatternKey] // strftime input is parsed into a regular expression if dateTimeFormat != "" { // %. matches each strftime format sequence and ReplaceAllStringFunc // looks up each format sequence in the conversion table strftimeToRegex // to replace with a defined regular expression r := regexp.MustCompile("%.") multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string { return strftimeToRegex[s] }) } if multilinePatternKey != "" { multilinePattern, err := regexp.Compile(multilinePatternKey) if err != nil { return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey) } return multilinePattern, nil } return nil, nil } // Maps strftime format strings to regex var strftimeToRegex = map[string]string{ /*weekdayShort */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`, /*weekdayFull */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`, /*weekdayZeroIndex */ `%w`: `[0-6]`, /*dayZeroPadded */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`, /*monthShort */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`, /*monthFull */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`, /*monthZeroPadded */ `%m`: `(?:0[1-9]|1[0-2])`, /*yearCentury */ `%Y`: `\d{4}`, /*yearZeroPadded */ `%y`: `\d{2}`, /*hour24ZeroPadded */ `%H`: `(?:[0,1][0-9]|2[0-3])`, /*hour12ZeroPadded */ `%I`: `(?:0[0-9]|1[0-2])`, /*AM or PM */ `%p`: "[A,P]M", /*minuteZeroPadded */ `%M`: `[0-5][0-9]`, /*secondZeroPadded */ `%S`: `[0-5][0-9]`, /*microsecondZeroPadded */ `%f`: `\d{6}`, /*utcOffset */ `%z`: `[+-]\d{4}`, /*tzName */ `%Z`: `[A-Z]{1,4}T`, /*dayOfYearZeroPadded */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`, /*milliseconds */ `%L`: `\.\d{3}`, } // newRegionFinder is a variable such that the implementation // can be swapped out for unit tests. var newRegionFinder = func() (regionFinder, error) { s, err := session.NewSession() if err != nil { return nil, err } return ec2metadata.New(s), nil } // newSDKEndpoint is a variable such that the implementation // can be swapped out for unit tests. var newSDKEndpoint = credentialsEndpoint // newAWSLogsClient creates the service client for Amazon CloudWatch Logs. // Customizations to the default client from the SDK include a Docker-specific // User-Agent string and automatic region detection using the EC2 Instance // Metadata Service when region is otherwise unspecified. func newAWSLogsClient(info logger.Info) (api, error) { var region, endpoint *string if os.Getenv(regionEnvKey) != "" { region = aws.String(os.Getenv(regionEnvKey)) } if info.Config[regionKey] != "" { region = aws.String(info.Config[regionKey]) } if info.Config[endpointKey] != "" { endpoint = aws.String(info.Config[endpointKey]) } if region == nil || *region == "" { logrus.Info("Trying to get region from EC2 Metadata") ec2MetadataClient, err := newRegionFinder() if err != nil { logrus.WithError(err).Error("could not create EC2 metadata client") return nil, errors.Wrap(err, "could not create EC2 metadata client") } r, err := ec2MetadataClient.Region() if err != nil { logrus.WithError(err).Error("Could not get region from EC2 metadata, environment, or log option") return nil, errors.New("Cannot determine region for awslogs driver") } region = &r } sess, err := session.NewSession() if err != nil { return nil, errors.New("Failed to create a service client session for awslogs driver") } // attach region to cloudwatchlogs config sess.Config.Region = region // attach endpoint to cloudwatchlogs config if endpoint != nil { sess.Config.Endpoint = endpoint } if uri, ok := info.Config[credentialsEndpointKey]; ok { logrus.Debugf("Trying to get credentials from awslogs-credentials-endpoint") endpoint := fmt.Sprintf("%s%s", newSDKEndpoint, uri) creds := endpointcreds.NewCredentialsClient(*sess.Config, sess.Handlers, endpoint, func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute }) // attach credentials to cloudwatchlogs config sess.Config.Credentials = creds } logrus.WithFields(logrus.Fields{ "region": *region, }).Debug("Created awslogs client") client := cloudwatchlogs.New(sess) client.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "DockerUserAgentHandler", Fn: func(r *request.Request) { currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) r.HTTPRequest.Header.Set(userAgentHeader, fmt.Sprintf("Docker %s (%s) %s", dockerversion.Version, runtime.GOOS, currentAgent)) }, }) return client, nil } // Name returns the name of the awslogs logging driver func (l *logStream) Name() string { return name } // BufSize returns the maximum bytes CloudWatch can handle. func (l *logStream) BufSize() int { return maximumBytesPerEvent } // Log submits messages for logging by an instance of the awslogs logging driver func (l *logStream) Log(msg *logger.Message) error { l.lock.RLock() defer l.lock.RUnlock() if l.closed { return errors.New("awslogs is closed") } if l.logNonBlocking { select { case l.messages <- msg: return nil default: return errors.New("awslogs buffer is full") } } l.messages <- msg return nil } // Close closes the instance of the awslogs logging driver func (l *logStream) Close() error { l.lock.Lock() defer l.lock.Unlock() if !l.closed { close(l.messages) } l.closed = true return nil } // create creates log group and log stream for the instance of the awslogs logging driver func (l *logStream) create() error { err := l.createLogStream() if err == nil { return nil } if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == resourceNotFoundCode && l.logCreateGroup { if err := l.createLogGroup(); err != nil { return errors.Wrap(err, "failed to create Cloudwatch log group") } err = l.createLogStream() if err == nil { return nil } } return errors.Wrap(err, "failed to create Cloudwatch log stream") } // createLogGroup creates a log group for the instance of the awslogs logging driver func (l *logStream) createLogGroup() error { if _, err := l.client.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ LogGroupName: aws.String(l.logGroupName), }); err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logCreateGroup": l.logCreateGroup, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log group already exists") return nil } logrus.WithFields(fields).Error("Failed to create log group") } return err } return nil } // createLogStream creates a log stream for the instance of the awslogs logging driver func (l *logStream) createLogStream() error { // Directly return if we do not want to create log stream. if !l.logCreateStream { logrus.WithFields(logrus.Fields{ "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, "logCreateStream": l.logCreateStream, }).Info("Skipping creating log stream") return nil } input := &cloudwatchlogs.CreateLogStreamInput{ LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } _, err := l.client.CreateLogStream(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log stream already exists") return nil } logrus.WithFields(fields).Error("Failed to create log stream") } } return err } // newTicker is used for time-based batching. newTicker is a variable such // that the implementation can be swapped out for unit tests. var newTicker = func(freq time.Duration) *time.Ticker { return time.NewTicker(freq) } // collectBatch executes as a goroutine to perform batching of log events for // submission to the log stream. If the awslogs-multiline-pattern or // awslogs-datetime-format options have been configured, multiline processing // is enabled, where log messages are stored in an event buffer until a multiline // pattern match is found, at which point the messages in the event buffer are // pushed to CloudWatch logs as a single log event. Multiline messages are processed // according to the maximumBytesPerPut constraint, and the implementation only // allows for messages to be buffered for a maximum of 2*batchPublishFrequency // seconds. When events are ready to be processed for submission to CloudWatch // Logs, the processEvents method is called. If a multiline pattern is not // configured, log events are submitted to the processEvents method immediately. func (l *logStream) collectBatch(created chan bool) { // Wait for the logstream/group to be created <-created flushInterval := l.forceFlushInterval if flushInterval <= 0 { flushInterval = defaultForceFlushInterval } ticker := newTicker(flushInterval) var eventBuffer []byte var eventBufferTimestamp int64 var batch = newEventBatch() for { select { case t := <-ticker.C: // If event buffer is older than batch publish frequency flush the event buffer if eventBufferTimestamp > 0 && len(eventBuffer) > 0 { eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp eventBufferExpired := eventBufferAge >= int64(flushInterval)/int64(time.Millisecond) eventBufferNegative := eventBufferAge < 0 if eventBufferExpired || eventBufferNegative { l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBuffer = eventBuffer[:0] } } l.publishBatch(batch) batch.reset() case msg, more := <-l.messages: if !more { // Flush event buffer and release resources l.processEvent(batch, eventBuffer, eventBufferTimestamp) l.publishBatch(batch) batch.reset() return } if eventBufferTimestamp == 0 { eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) } line := msg.Line if l.multilinePattern != nil { lineEffectiveLen := effectiveLen(string(line)) if l.multilinePattern.Match(line) || effectiveLen(string(eventBuffer))+lineEffectiveLen > maximumBytesPerEvent { // This is a new log event or we will exceed max bytes per event // so flush the current eventBuffer to events and reset timestamp l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) eventBuffer = eventBuffer[:0] } // Append newline if event is less than max event size if lineEffectiveLen < maximumBytesPerEvent { line = append(line, "\n"...) } eventBuffer = append(eventBuffer, line...) logger.PutMessage(msg) } else { l.processEvent(batch, line, msg.Timestamp.UnixNano()/int64(time.Millisecond)) logger.PutMessage(msg) } } } } // processEvent processes log events that are ready for submission to CloudWatch // logs. Batching is performed on time- and size-bases. Time-based batching // occurs at a 5 second interval (defined in the batchPublishFrequency const). // Size-based batching is performed on the maximum number of events per batch // (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a // batch (defined in maximumBytesPerPut). Log messages are split by the maximum // bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event // byte overhead (defined in perEventBytes) which is accounted for in split- and // batch-calculations. Because the events are interpreted as UTF-8 encoded // Unicode, invalid UTF-8 byte sequences are replaced with the Unicode // replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To // compensate for that and to avoid splitting valid UTF-8 characters into // invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. func (l *logStream) processEvent(batch *eventBatch, bytes []byte, timestamp int64) { for len(bytes) > 0 { // Split line length so it does not exceed the maximum splitOffset, lineBytes := findValidSplit(string(bytes), maximumBytesPerEvent) line := bytes[:splitOffset] event := wrappedEvent{ inputLogEvent: &cloudwatchlogs.InputLogEvent{ Message: aws.String(string(line)), Timestamp: aws.Int64(timestamp), }, insertOrder: batch.count(), } added := batch.add(event, lineBytes) if added { bytes = bytes[splitOffset:] } else { l.publishBatch(batch) batch.reset() } } } // effectiveLen counts the effective number of bytes in the string, after // UTF-8 normalization. UTF-8 normalization includes replacing bytes that do // not constitute valid UTF-8 encoded Unicode codepoints with the Unicode // replacement codepoint U+FFFD (a 3-byte UTF-8 sequence, represented in Go as // utf8.RuneError) func effectiveLen(line string) int { effectiveBytes := 0 for _, rune := range line { effectiveBytes += utf8.RuneLen(rune) } return effectiveBytes } // findValidSplit finds the byte offset to split a string without breaking valid // Unicode codepoints given a maximum number of total bytes. findValidSplit // returns the byte offset for splitting a string or []byte, as well as the // effective number of bytes if the string were normalized to replace invalid // UTF-8 encoded bytes with the Unicode replacement character (a 3-byte UTF-8 // sequence, represented in Go as utf8.RuneError) func findValidSplit(line string, maxBytes int) (splitOffset, effectiveBytes int) { for offset, rune := range line { splitOffset = offset if effectiveBytes+utf8.RuneLen(rune) > maxBytes { return splitOffset, effectiveBytes } effectiveBytes += utf8.RuneLen(rune) } splitOffset = len(line) return } // publishBatch calls PutLogEvents for a given set of InputLogEvents, // accounting for sequencing requirements (each request must reference the // sequence token returned by the previous request). func (l *logStream) publishBatch(batch *eventBatch) { if batch.isEmpty() { return } cwEvents := unwrapEvents(batch.events()) nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == dataAlreadyAcceptedCode { // already submitted, just grab the correct sequence token parts := strings.Split(awsErr.Message(), " ") nextSequenceToken = &parts[len(parts)-1] logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Info("Data already accepted, ignoring error") err = nil } else if awsErr.Code() == invalidSequenceTokenCode { // sequence code is bad, grab the correct one and retry parts := strings.Split(awsErr.Message(), " ") token := parts[len(parts)-1] nextSequenceToken, err = l.putLogEvents(cwEvents, &token) } } } if err != nil { logrus.Error(err) } else { l.sequenceToken = nextSequenceToken } } // putLogEvents wraps the PutLogEvents API func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { input := &cloudwatchlogs.PutLogEventsInput{ LogEvents: events, SequenceToken: sequenceToken, LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } resp, err := l.client.PutLogEvents(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Error("Failed to put log events") } return nil, err } return resp.NextSequenceToken, nil } // ValidateLogOpt looks for awslogs-specific log options awslogs-region, awslogs-endpoint // awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format, // awslogs-multiline-pattern func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case logGroupKey: case logStreamKey: case logCreateGroupKey: case regionKey: case endpointKey: case tagKey: case datetimeFormatKey: case multilinePatternKey: case credentialsEndpointKey: case forceFlushIntervalKey: case maxBufferedEventsKey: default: return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) } } if cfg[logGroupKey] == "" { return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) } if cfg[logCreateGroupKey] != "" { if _, err := strconv.ParseBool(cfg[logCreateGroupKey]); err != nil { return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err) } } if cfg[forceFlushIntervalKey] != "" { if value, err := strconv.Atoi(cfg[forceFlushIntervalKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", forceFlushIntervalKey, cfg[forceFlushIntervalKey]) } } if cfg[maxBufferedEventsKey] != "" { if value, err := strconv.Atoi(cfg[maxBufferedEventsKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", maxBufferedEventsKey, cfg[maxBufferedEventsKey]) } } _, datetimeFormatKeyExists := cfg[datetimeFormatKey] _, multilinePatternKeyExists := cfg[multilinePatternKey] if datetimeFormatKeyExists && multilinePatternKeyExists { return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey) } return nil } // Len returns the length of a byTimestamp slice. Len is required by the // sort.Interface interface. func (slice byTimestamp) Len() int { return len(slice) } // Less compares two values in a byTimestamp slice by Timestamp. Less is // required by the sort.Interface interface. func (slice byTimestamp) Less(i, j int) bool { iTimestamp, jTimestamp := int64(0), int64(0) if slice != nil && slice[i].inputLogEvent.Timestamp != nil { iTimestamp = *slice[i].inputLogEvent.Timestamp } if slice != nil && slice[j].inputLogEvent.Timestamp != nil { jTimestamp = *slice[j].inputLogEvent.Timestamp } if iTimestamp == jTimestamp { return slice[i].insertOrder < slice[j].insertOrder } return iTimestamp < jTimestamp } // Swap swaps two values in a byTimestamp slice with each other. Swap is // required by the sort.Interface interface. func (slice byTimestamp) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events)) for i, input := range events { cwEvents[i] = input.inputLogEvent } return cwEvents } func newEventBatch() *eventBatch { return &eventBatch{ batch: make([]wrappedEvent, 0), bytes: 0, } } // events returns a slice of wrappedEvents sorted in order of their // timestamps and then by their insertion order (see `byTimestamp`). // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) events() []wrappedEvent { sort.Sort(byTimestamp(b.batch)) return b.batch } // add adds an event to the batch of events accounting for the // necessary overhead for an event to be logged. An error will be // returned if the event cannot be added to the batch due to service // limits. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) add(event wrappedEvent, size int) bool { addBytes := size + perEventBytes // verify we are still within service limits switch { case len(b.batch)+1 > maximumLogEventsPerPut: return false case b.bytes+addBytes > maximumBytesPerPut: return false } b.bytes += addBytes b.batch = append(b.batch, event) return true } // count is the number of batched events. Warning: this method // is not threadsafe and must not be used concurrently. func (b *eventBatch) count() int { return len(b.batch) } // size is the total number of bytes that the batch represents. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) size() int { return b.bytes } func (b *eventBatch) isEmpty() bool { zeroEvents := b.count() == 0 zeroSize := b.size() == 0 return zeroEvents && zeroSize } // reset prepares the batch for reuse. func (b *eventBatch) reset() { b.bytes = 0 b.batch = b.batch[:0] }
// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs package awslogs // import "github.com/docker/docker/daemon/logger/awslogs" import ( "fmt" "os" "regexp" "runtime" "sort" "strconv" "strings" "sync" "time" "unicode/utf8" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/dockerversion" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( name = "awslogs" regionKey = "awslogs-region" endpointKey = "awslogs-endpoint" regionEnvKey = "AWS_REGION" logGroupKey = "awslogs-group" logStreamKey = "awslogs-stream" logCreateGroupKey = "awslogs-create-group" logCreateStreamKey = "awslogs-create-stream" tagKey = "tag" datetimeFormatKey = "awslogs-datetime-format" multilinePatternKey = "awslogs-multiline-pattern" credentialsEndpointKey = "awslogs-credentials-endpoint" forceFlushIntervalKey = "awslogs-force-flush-interval-seconds" maxBufferedEventsKey = "awslogs-max-buffered-events" logFormatKey = "awslogs-format" defaultForceFlushInterval = 5 * time.Second defaultMaxBufferedEvents = 4096 // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html perEventBytes = 26 maximumBytesPerPut = 1048576 maximumLogEventsPerPut = 10000 // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html // Because the events are interpreted as UTF-8 encoded Unicode, invalid UTF-8 byte sequences are replaced with the // Unicode replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To compensate for that and to avoid // splitting valid UTF-8 characters into invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. maximumBytesPerEvent = 262144 - perEventBytes resourceAlreadyExistsCode = "ResourceAlreadyExistsException" dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" invalidSequenceTokenCode = "InvalidSequenceTokenException" resourceNotFoundCode = "ResourceNotFoundException" credentialsEndpoint = "http://169.254.170.2" userAgentHeader = "User-Agent" // See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format_Specification.html logsFormatHeader = "x-amzn-logs-format" jsonEmfLogFormat = "json/emf" ) type logStream struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration multilinePattern *regexp.Regexp client api messages chan *logger.Message lock sync.RWMutex closed bool sequenceToken *string } type logStreamConfig struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration maxBufferedEvents int multilinePattern *regexp.Regexp } var _ logger.SizedLogger = &logStream{} type api interface { CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) } type regionFinder interface { Region() (string, error) } type wrappedEvent struct { inputLogEvent *cloudwatchlogs.InputLogEvent insertOrder int } type byTimestamp []wrappedEvent // init registers the awslogs driver func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // eventBatch holds the events that are batched for submission and the // associated data about it. // // Warning: this type is not threadsafe and must not be used // concurrently. This type is expected to be consumed in a single go // routine and never concurrently. type eventBatch struct { batch []wrappedEvent bytes int } // New creates an awslogs logger using the configuration passed in on the // context. Supported context configuration variables are awslogs-region, // awslogs-endpoint, awslogs-group, awslogs-stream, awslogs-create-group, // awslogs-multiline-pattern and awslogs-datetime-format. // When available, configuration is also taken from environment variables // AWS_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, the shared credentials // file (~/.aws/credentials), and the EC2 Instance Metadata Service. func New(info logger.Info) (logger.Logger, error) { containerStreamConfig, err := newStreamConfig(info) if err != nil { return nil, err } client, err := newAWSLogsClient(info) if err != nil { return nil, err } containerStream := &logStream{ logStreamName: containerStreamConfig.logStreamName, logGroupName: containerStreamConfig.logGroupName, logCreateGroup: containerStreamConfig.logCreateGroup, logCreateStream: containerStreamConfig.logCreateStream, logNonBlocking: containerStreamConfig.logNonBlocking, forceFlushInterval: containerStreamConfig.forceFlushInterval, multilinePattern: containerStreamConfig.multilinePattern, client: client, messages: make(chan *logger.Message, containerStreamConfig.maxBufferedEvents), } creationDone := make(chan bool) if containerStream.logNonBlocking { go func() { backoff := 1 maxBackoff := 32 for { // If logger is closed we are done containerStream.lock.RLock() if containerStream.closed { containerStream.lock.RUnlock() break } containerStream.lock.RUnlock() err := containerStream.create() if err == nil { break } time.Sleep(time.Duration(backoff) * time.Second) if backoff < maxBackoff { backoff *= 2 } logrus. WithError(err). WithField("container-id", info.ContainerID). WithField("container-name", info.ContainerName). Error("Error while trying to initialize awslogs. Retrying in: ", backoff, " seconds") } close(creationDone) }() } else { if err = containerStream.create(); err != nil { return nil, err } close(creationDone) } go containerStream.collectBatch(creationDone) return containerStream, nil } // Parses most of the awslogs- options and prepares a config object to be used for newing the actual stream // It has been formed out to ease Utest of the New above func newStreamConfig(info logger.Info) (*logStreamConfig, error) { logGroupName := info.Config[logGroupKey] logStreamName, err := loggerutils.ParseLogTag(info, "{{.FullID}}") if err != nil { return nil, err } logCreateGroup := false if info.Config[logCreateGroupKey] != "" { logCreateGroup, err = strconv.ParseBool(info.Config[logCreateGroupKey]) if err != nil { return nil, err } } logNonBlocking := info.Config["mode"] == "non-blocking" forceFlushInterval := defaultForceFlushInterval if info.Config[forceFlushIntervalKey] != "" { forceFlushIntervalAsInt, err := strconv.Atoi(info.Config[forceFlushIntervalKey]) if err != nil { return nil, err } forceFlushInterval = time.Duration(forceFlushIntervalAsInt) * time.Second } maxBufferedEvents := int(defaultMaxBufferedEvents) if info.Config[maxBufferedEventsKey] != "" { maxBufferedEvents, err = strconv.Atoi(info.Config[maxBufferedEventsKey]) if err != nil { return nil, err } } if info.Config[logStreamKey] != "" { logStreamName = info.Config[logStreamKey] } logCreateStream := true if info.Config[logCreateStreamKey] != "" { logCreateStream, err = strconv.ParseBool(info.Config[logCreateStreamKey]) if err != nil { return nil, err } } multilinePattern, err := parseMultilineOptions(info) if err != nil { return nil, err } containerStreamConfig := &logStreamConfig{ logStreamName: logStreamName, logGroupName: logGroupName, logCreateGroup: logCreateGroup, logCreateStream: logCreateStream, logNonBlocking: logNonBlocking, forceFlushInterval: forceFlushInterval, maxBufferedEvents: maxBufferedEvents, multilinePattern: multilinePattern, } return containerStreamConfig, nil } // Parses awslogs-multiline-pattern and awslogs-datetime-format options // If awslogs-datetime-format is present, convert the format from strftime // to regexp and return. // If awslogs-multiline-pattern is present, compile regexp and return func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) { dateTimeFormat := info.Config[datetimeFormatKey] multilinePatternKey := info.Config[multilinePatternKey] // strftime input is parsed into a regular expression if dateTimeFormat != "" { // %. matches each strftime format sequence and ReplaceAllStringFunc // looks up each format sequence in the conversion table strftimeToRegex // to replace with a defined regular expression r := regexp.MustCompile("%.") multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string { return strftimeToRegex[s] }) } if multilinePatternKey != "" { multilinePattern, err := regexp.Compile(multilinePatternKey) if err != nil { return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey) } return multilinePattern, nil } return nil, nil } // Maps strftime format strings to regex var strftimeToRegex = map[string]string{ /*weekdayShort */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`, /*weekdayFull */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`, /*weekdayZeroIndex */ `%w`: `[0-6]`, /*dayZeroPadded */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`, /*monthShort */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`, /*monthFull */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`, /*monthZeroPadded */ `%m`: `(?:0[1-9]|1[0-2])`, /*yearCentury */ `%Y`: `\d{4}`, /*yearZeroPadded */ `%y`: `\d{2}`, /*hour24ZeroPadded */ `%H`: `(?:[0,1][0-9]|2[0-3])`, /*hour12ZeroPadded */ `%I`: `(?:0[0-9]|1[0-2])`, /*AM or PM */ `%p`: "[A,P]M", /*minuteZeroPadded */ `%M`: `[0-5][0-9]`, /*secondZeroPadded */ `%S`: `[0-5][0-9]`, /*microsecondZeroPadded */ `%f`: `\d{6}`, /*utcOffset */ `%z`: `[+-]\d{4}`, /*tzName */ `%Z`: `[A-Z]{1,4}T`, /*dayOfYearZeroPadded */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`, /*milliseconds */ `%L`: `\.\d{3}`, } // newRegionFinder is a variable such that the implementation // can be swapped out for unit tests. var newRegionFinder = func() (regionFinder, error) { s, err := session.NewSession() if err != nil { return nil, err } return ec2metadata.New(s), nil } // newSDKEndpoint is a variable such that the implementation // can be swapped out for unit tests. var newSDKEndpoint = credentialsEndpoint // newAWSLogsClient creates the service client for Amazon CloudWatch Logs. // Customizations to the default client from the SDK include a Docker-specific // User-Agent string and automatic region detection using the EC2 Instance // Metadata Service when region is otherwise unspecified. func newAWSLogsClient(info logger.Info) (api, error) { var region, endpoint *string if os.Getenv(regionEnvKey) != "" { region = aws.String(os.Getenv(regionEnvKey)) } if info.Config[regionKey] != "" { region = aws.String(info.Config[regionKey]) } if info.Config[endpointKey] != "" { endpoint = aws.String(info.Config[endpointKey]) } if region == nil || *region == "" { logrus.Info("Trying to get region from EC2 Metadata") ec2MetadataClient, err := newRegionFinder() if err != nil { logrus.WithError(err).Error("could not create EC2 metadata client") return nil, errors.Wrap(err, "could not create EC2 metadata client") } r, err := ec2MetadataClient.Region() if err != nil { logrus.WithError(err).Error("Could not get region from EC2 metadata, environment, or log option") return nil, errors.New("Cannot determine region for awslogs driver") } region = &r } sess, err := session.NewSession() if err != nil { return nil, errors.New("Failed to create a service client session for awslogs driver") } // attach region to cloudwatchlogs config sess.Config.Region = region // attach endpoint to cloudwatchlogs config if endpoint != nil { sess.Config.Endpoint = endpoint } if uri, ok := info.Config[credentialsEndpointKey]; ok { logrus.Debugf("Trying to get credentials from awslogs-credentials-endpoint") endpoint := fmt.Sprintf("%s%s", newSDKEndpoint, uri) creds := endpointcreds.NewCredentialsClient(*sess.Config, sess.Handlers, endpoint, func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute }) // attach credentials to cloudwatchlogs config sess.Config.Credentials = creds } logrus.WithFields(logrus.Fields{ "region": *region, }).Debug("Created awslogs client") client := cloudwatchlogs.New(sess) client.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "DockerUserAgentHandler", Fn: func(r *request.Request) { currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) r.HTTPRequest.Header.Set(userAgentHeader, fmt.Sprintf("Docker %s (%s) %s", dockerversion.Version, runtime.GOOS, currentAgent)) }, }) if info.Config[logFormatKey] != "" { client.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "LogFormatHeaderHandler", Fn: func(req *request.Request) { req.HTTPRequest.Header.Set(logsFormatHeader, info.Config[logFormatKey]) }, }) } return client, nil } // Name returns the name of the awslogs logging driver func (l *logStream) Name() string { return name } // BufSize returns the maximum bytes CloudWatch can handle. func (l *logStream) BufSize() int { return maximumBytesPerEvent } // Log submits messages for logging by an instance of the awslogs logging driver func (l *logStream) Log(msg *logger.Message) error { l.lock.RLock() defer l.lock.RUnlock() if l.closed { return errors.New("awslogs is closed") } if l.logNonBlocking { select { case l.messages <- msg: return nil default: return errors.New("awslogs buffer is full") } } l.messages <- msg return nil } // Close closes the instance of the awslogs logging driver func (l *logStream) Close() error { l.lock.Lock() defer l.lock.Unlock() if !l.closed { close(l.messages) } l.closed = true return nil } // create creates log group and log stream for the instance of the awslogs logging driver func (l *logStream) create() error { err := l.createLogStream() if err == nil { return nil } if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == resourceNotFoundCode && l.logCreateGroup { if err := l.createLogGroup(); err != nil { return errors.Wrap(err, "failed to create Cloudwatch log group") } err = l.createLogStream() if err == nil { return nil } } return errors.Wrap(err, "failed to create Cloudwatch log stream") } // createLogGroup creates a log group for the instance of the awslogs logging driver func (l *logStream) createLogGroup() error { if _, err := l.client.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ LogGroupName: aws.String(l.logGroupName), }); err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logCreateGroup": l.logCreateGroup, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log group already exists") return nil } logrus.WithFields(fields).Error("Failed to create log group") } return err } return nil } // createLogStream creates a log stream for the instance of the awslogs logging driver func (l *logStream) createLogStream() error { // Directly return if we do not want to create log stream. if !l.logCreateStream { logrus.WithFields(logrus.Fields{ "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, "logCreateStream": l.logCreateStream, }).Info("Skipping creating log stream") return nil } input := &cloudwatchlogs.CreateLogStreamInput{ LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } _, err := l.client.CreateLogStream(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log stream already exists") return nil } logrus.WithFields(fields).Error("Failed to create log stream") } } return err } // newTicker is used for time-based batching. newTicker is a variable such // that the implementation can be swapped out for unit tests. var newTicker = func(freq time.Duration) *time.Ticker { return time.NewTicker(freq) } // collectBatch executes as a goroutine to perform batching of log events for // submission to the log stream. If the awslogs-multiline-pattern or // awslogs-datetime-format options have been configured, multiline processing // is enabled, where log messages are stored in an event buffer until a multiline // pattern match is found, at which point the messages in the event buffer are // pushed to CloudWatch logs as a single log event. Multiline messages are processed // according to the maximumBytesPerPut constraint, and the implementation only // allows for messages to be buffered for a maximum of 2*batchPublishFrequency // seconds. When events are ready to be processed for submission to CloudWatch // Logs, the processEvents method is called. If a multiline pattern is not // configured, log events are submitted to the processEvents method immediately. func (l *logStream) collectBatch(created chan bool) { // Wait for the logstream/group to be created <-created flushInterval := l.forceFlushInterval if flushInterval <= 0 { flushInterval = defaultForceFlushInterval } ticker := newTicker(flushInterval) var eventBuffer []byte var eventBufferTimestamp int64 var batch = newEventBatch() for { select { case t := <-ticker.C: // If event buffer is older than batch publish frequency flush the event buffer if eventBufferTimestamp > 0 && len(eventBuffer) > 0 { eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp eventBufferExpired := eventBufferAge >= int64(flushInterval)/int64(time.Millisecond) eventBufferNegative := eventBufferAge < 0 if eventBufferExpired || eventBufferNegative { l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBuffer = eventBuffer[:0] } } l.publishBatch(batch) batch.reset() case msg, more := <-l.messages: if !more { // Flush event buffer and release resources l.processEvent(batch, eventBuffer, eventBufferTimestamp) l.publishBatch(batch) batch.reset() return } if eventBufferTimestamp == 0 { eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) } line := msg.Line if l.multilinePattern != nil { lineEffectiveLen := effectiveLen(string(line)) if l.multilinePattern.Match(line) || effectiveLen(string(eventBuffer))+lineEffectiveLen > maximumBytesPerEvent { // This is a new log event or we will exceed max bytes per event // so flush the current eventBuffer to events and reset timestamp l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) eventBuffer = eventBuffer[:0] } // Append newline if event is less than max event size if lineEffectiveLen < maximumBytesPerEvent { line = append(line, "\n"...) } eventBuffer = append(eventBuffer, line...) logger.PutMessage(msg) } else { l.processEvent(batch, line, msg.Timestamp.UnixNano()/int64(time.Millisecond)) logger.PutMessage(msg) } } } } // processEvent processes log events that are ready for submission to CloudWatch // logs. Batching is performed on time- and size-bases. Time-based batching // occurs at a 5 second interval (defined in the batchPublishFrequency const). // Size-based batching is performed on the maximum number of events per batch // (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a // batch (defined in maximumBytesPerPut). Log messages are split by the maximum // bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event // byte overhead (defined in perEventBytes) which is accounted for in split- and // batch-calculations. Because the events are interpreted as UTF-8 encoded // Unicode, invalid UTF-8 byte sequences are replaced with the Unicode // replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To // compensate for that and to avoid splitting valid UTF-8 characters into // invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. func (l *logStream) processEvent(batch *eventBatch, bytes []byte, timestamp int64) { for len(bytes) > 0 { // Split line length so it does not exceed the maximum splitOffset, lineBytes := findValidSplit(string(bytes), maximumBytesPerEvent) line := bytes[:splitOffset] event := wrappedEvent{ inputLogEvent: &cloudwatchlogs.InputLogEvent{ Message: aws.String(string(line)), Timestamp: aws.Int64(timestamp), }, insertOrder: batch.count(), } added := batch.add(event, lineBytes) if added { bytes = bytes[splitOffset:] } else { l.publishBatch(batch) batch.reset() } } } // effectiveLen counts the effective number of bytes in the string, after // UTF-8 normalization. UTF-8 normalization includes replacing bytes that do // not constitute valid UTF-8 encoded Unicode codepoints with the Unicode // replacement codepoint U+FFFD (a 3-byte UTF-8 sequence, represented in Go as // utf8.RuneError) func effectiveLen(line string) int { effectiveBytes := 0 for _, rune := range line { effectiveBytes += utf8.RuneLen(rune) } return effectiveBytes } // findValidSplit finds the byte offset to split a string without breaking valid // Unicode codepoints given a maximum number of total bytes. findValidSplit // returns the byte offset for splitting a string or []byte, as well as the // effective number of bytes if the string were normalized to replace invalid // UTF-8 encoded bytes with the Unicode replacement character (a 3-byte UTF-8 // sequence, represented in Go as utf8.RuneError) func findValidSplit(line string, maxBytes int) (splitOffset, effectiveBytes int) { for offset, rune := range line { splitOffset = offset if effectiveBytes+utf8.RuneLen(rune) > maxBytes { return splitOffset, effectiveBytes } effectiveBytes += utf8.RuneLen(rune) } splitOffset = len(line) return } // publishBatch calls PutLogEvents for a given set of InputLogEvents, // accounting for sequencing requirements (each request must reference the // sequence token returned by the previous request). func (l *logStream) publishBatch(batch *eventBatch) { if batch.isEmpty() { return } cwEvents := unwrapEvents(batch.events()) nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == dataAlreadyAcceptedCode { // already submitted, just grab the correct sequence token parts := strings.Split(awsErr.Message(), " ") nextSequenceToken = &parts[len(parts)-1] logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Info("Data already accepted, ignoring error") err = nil } else if awsErr.Code() == invalidSequenceTokenCode { // sequence code is bad, grab the correct one and retry parts := strings.Split(awsErr.Message(), " ") token := parts[len(parts)-1] nextSequenceToken, err = l.putLogEvents(cwEvents, &token) } } } if err != nil { logrus.Error(err) } else { l.sequenceToken = nextSequenceToken } } // putLogEvents wraps the PutLogEvents API func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { input := &cloudwatchlogs.PutLogEventsInput{ LogEvents: events, SequenceToken: sequenceToken, LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } resp, err := l.client.PutLogEvents(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Error("Failed to put log events") } return nil, err } return resp.NextSequenceToken, nil } // ValidateLogOpt looks for awslogs-specific log options awslogs-region, awslogs-endpoint // awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format, // awslogs-multiline-pattern func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case logGroupKey: case logStreamKey: case logCreateGroupKey: case regionKey: case endpointKey: case tagKey: case datetimeFormatKey: case multilinePatternKey: case credentialsEndpointKey: case forceFlushIntervalKey: case maxBufferedEventsKey: case logFormatKey: default: return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) } } if cfg[logGroupKey] == "" { return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) } if cfg[logCreateGroupKey] != "" { if _, err := strconv.ParseBool(cfg[logCreateGroupKey]); err != nil { return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err) } } if cfg[forceFlushIntervalKey] != "" { if value, err := strconv.Atoi(cfg[forceFlushIntervalKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", forceFlushIntervalKey, cfg[forceFlushIntervalKey]) } } if cfg[maxBufferedEventsKey] != "" { if value, err := strconv.Atoi(cfg[maxBufferedEventsKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", maxBufferedEventsKey, cfg[maxBufferedEventsKey]) } } _, datetimeFormatKeyExists := cfg[datetimeFormatKey] _, multilinePatternKeyExists := cfg[multilinePatternKey] if datetimeFormatKeyExists && multilinePatternKeyExists { return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey) } if cfg[logFormatKey] != "" { // For now, only the "json/emf" log format is supported if cfg[logFormatKey] != jsonEmfLogFormat { return fmt.Errorf("unsupported log format '%s'", cfg[logFormatKey]) } if datetimeFormatKeyExists || multilinePatternKeyExists { return fmt.Errorf("you cannot configure log opt '%s' or '%s' when log opt '%s' is set to '%s'", datetimeFormatKey, multilinePatternKey, logFormatKey, jsonEmfLogFormat) } } return nil } // Len returns the length of a byTimestamp slice. Len is required by the // sort.Interface interface. func (slice byTimestamp) Len() int { return len(slice) } // Less compares two values in a byTimestamp slice by Timestamp. Less is // required by the sort.Interface interface. func (slice byTimestamp) Less(i, j int) bool { iTimestamp, jTimestamp := int64(0), int64(0) if slice != nil && slice[i].inputLogEvent.Timestamp != nil { iTimestamp = *slice[i].inputLogEvent.Timestamp } if slice != nil && slice[j].inputLogEvent.Timestamp != nil { jTimestamp = *slice[j].inputLogEvent.Timestamp } if iTimestamp == jTimestamp { return slice[i].insertOrder < slice[j].insertOrder } return iTimestamp < jTimestamp } // Swap swaps two values in a byTimestamp slice with each other. Swap is // required by the sort.Interface interface. func (slice byTimestamp) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events)) for i, input := range events { cwEvents[i] = input.inputLogEvent } return cwEvents } func newEventBatch() *eventBatch { return &eventBatch{ batch: make([]wrappedEvent, 0), bytes: 0, } } // events returns a slice of wrappedEvents sorted in order of their // timestamps and then by their insertion order (see `byTimestamp`). // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) events() []wrappedEvent { sort.Sort(byTimestamp(b.batch)) return b.batch } // add adds an event to the batch of events accounting for the // necessary overhead for an event to be logged. An error will be // returned if the event cannot be added to the batch due to service // limits. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) add(event wrappedEvent, size int) bool { addBytes := size + perEventBytes // verify we are still within service limits switch { case len(b.batch)+1 > maximumLogEventsPerPut: return false case b.bytes+addBytes > maximumBytesPerPut: return false } b.bytes += addBytes b.batch = append(b.batch, event) return true } // count is the number of batched events. Warning: this method // is not threadsafe and must not be used concurrently. func (b *eventBatch) count() int { return len(b.batch) } // size is the total number of bytes that the batch represents. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) size() int { return b.bytes } func (b *eventBatch) isEmpty() bool { zeroEvents := b.count() == 0 zeroSize := b.size() == 0 return zeroEvents && zeroSize } // reset prepares the batch for reuse. func (b *eventBatch) reset() { b.bytes = 0 b.batch = b.batch[:0] }
sanjams2
b46ab1f57997feed98aa2cccd8e4f5a612b963bc
787b8fe14f34f04f723bd0913f293a59be428fe6
I think it'd be worth changing the name of the option to remove the repetition. ```suggestion logFormatKey = "awslogs-format" ```
samuelkarp
4,448
moby/moby
42,838
Add an option to specify log format for awslogs driver
Added an option 'awslogs-format' to allow specifying a log format for the logs sent CloudWatch from the aws log driver. For now, only the 'json/emf' format is supported. If no option is provided, the log format header in the request to CloudWatch will be omitted as before. Signed-off-by: James Sanders <[email protected]> **- What I did** - Added an option `awslogs-format` to allow specifying a log format for the logs sent CloudWatch from the aws log driver. - For now, only the `json/emf` format is supported. - If no option is provided, the log format header in the request to CloudWatch will be omitted as before. - This addresses https://github.com/moby/moby/issues/42731 - Users cannot provide the `awslogs-datetime-format` or the `awslogs-multiline-pattern` options when they set the `awslogs-format` option cannot be set to `json/emf`. This is because both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options modify the log line delimiter. Given for now `json/emf` format must be a valid json line, it does not make sense to allow for different line delimiters when using `json/emf`. **- How I did it** - Modified the `awslogs` log driver to add a new CloudWatch client "handler" (basically middleware) to add the `x-amzn-logs-format` header to the request when the log format option is given and is `json/emf`. - Tried to follow convention at every step of the way. - Based key name off of the key name used in the [fluentd plugin](https://docs.fluentbit.io/manual/pipeline/outputs/cloudwatch) **- How to verify it** - Unit Testing - Manual integration testing: - Ran container with no format option provided and verified logs were still published (no regression) - Ran container in development environment with `--log-opt awslogs-format=json/emf` enabled and saw metrics posted to CloudWatch: ```bash docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format=json/emf \ emf-logger \ run ``` - Metrics in CloudWatch: <img width="1091" alt="image" src="https://user-images.githubusercontent.com/10730172/132904614-a4c05775-a495-449c-9468-308edf9a0aa6.png"> - Attempted to specify a "random" log format and saw an error was thrown ```bash # docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format=random \ emf-logger \ run docker: Error response from daemon: unsupported log format 'random'. ``` - Attempted to set `awslogs-datetime-format` and saw an error was thrown ```bash docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format='json/emf' \ --log-opt awslogs-datetime-format='YYYY-mm-dd' \ emf-logger \ serve docker: Error response from daemon: you cannot configure log opt 'awslogs-datetime-format' or 'awslogs-multiline-pattern' when log opt 'awslogs-format' is set to 'json/emf'. ``` - Attempted to set `awslogs-multiline-pattern` and saw an error was thrown ```bash docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format='json/emf' \ --log-opt awslogs-multiline-pattern='-------' \ emf-logger \ serve docker: Error response from daemon: you cannot configure log opt 'awslogs-datetime-format' or 'awslogs-multiline-pattern' when log opt 'awslogs-format' is set to 'json/emf'. ``` **- Description for the changelog** Added a new option to `awslogs` log driver to specify the log format that is sent to CloudWatch. **- A picture of a cute animal (not mandatory but encouraged)** ![image](https://user-images.githubusercontent.com/10730172/132908691-81dd1525-5133-4ac2-938e-b1e7d84e30c9.png)
null
2021-09-10 20:09:50+00:00
2021-12-02 19:48:07+00:00
daemon/logger/awslogs/cloudwatchlogs.go
// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs package awslogs // import "github.com/docker/docker/daemon/logger/awslogs" import ( "fmt" "os" "regexp" "runtime" "sort" "strconv" "strings" "sync" "time" "unicode/utf8" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/dockerversion" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( name = "awslogs" regionKey = "awslogs-region" endpointKey = "awslogs-endpoint" regionEnvKey = "AWS_REGION" logGroupKey = "awslogs-group" logStreamKey = "awslogs-stream" logCreateGroupKey = "awslogs-create-group" logCreateStreamKey = "awslogs-create-stream" tagKey = "tag" datetimeFormatKey = "awslogs-datetime-format" multilinePatternKey = "awslogs-multiline-pattern" credentialsEndpointKey = "awslogs-credentials-endpoint" forceFlushIntervalKey = "awslogs-force-flush-interval-seconds" maxBufferedEventsKey = "awslogs-max-buffered-events" defaultForceFlushInterval = 5 * time.Second defaultMaxBufferedEvents = 4096 // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html perEventBytes = 26 maximumBytesPerPut = 1048576 maximumLogEventsPerPut = 10000 // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html // Because the events are interpreted as UTF-8 encoded Unicode, invalid UTF-8 byte sequences are replaced with the // Unicode replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To compensate for that and to avoid // splitting valid UTF-8 characters into invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. maximumBytesPerEvent = 262144 - perEventBytes resourceAlreadyExistsCode = "ResourceAlreadyExistsException" dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" invalidSequenceTokenCode = "InvalidSequenceTokenException" resourceNotFoundCode = "ResourceNotFoundException" credentialsEndpoint = "http://169.254.170.2" userAgentHeader = "User-Agent" ) type logStream struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration multilinePattern *regexp.Regexp client api messages chan *logger.Message lock sync.RWMutex closed bool sequenceToken *string } type logStreamConfig struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration maxBufferedEvents int multilinePattern *regexp.Regexp } var _ logger.SizedLogger = &logStream{} type api interface { CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) } type regionFinder interface { Region() (string, error) } type wrappedEvent struct { inputLogEvent *cloudwatchlogs.InputLogEvent insertOrder int } type byTimestamp []wrappedEvent // init registers the awslogs driver func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // eventBatch holds the events that are batched for submission and the // associated data about it. // // Warning: this type is not threadsafe and must not be used // concurrently. This type is expected to be consumed in a single go // routine and never concurrently. type eventBatch struct { batch []wrappedEvent bytes int } // New creates an awslogs logger using the configuration passed in on the // context. Supported context configuration variables are awslogs-region, // awslogs-endpoint, awslogs-group, awslogs-stream, awslogs-create-group, // awslogs-multiline-pattern and awslogs-datetime-format. // When available, configuration is also taken from environment variables // AWS_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, the shared credentials // file (~/.aws/credentials), and the EC2 Instance Metadata Service. func New(info logger.Info) (logger.Logger, error) { containerStreamConfig, err := newStreamConfig(info) if err != nil { return nil, err } client, err := newAWSLogsClient(info) if err != nil { return nil, err } containerStream := &logStream{ logStreamName: containerStreamConfig.logStreamName, logGroupName: containerStreamConfig.logGroupName, logCreateGroup: containerStreamConfig.logCreateGroup, logCreateStream: containerStreamConfig.logCreateStream, logNonBlocking: containerStreamConfig.logNonBlocking, forceFlushInterval: containerStreamConfig.forceFlushInterval, multilinePattern: containerStreamConfig.multilinePattern, client: client, messages: make(chan *logger.Message, containerStreamConfig.maxBufferedEvents), } creationDone := make(chan bool) if containerStream.logNonBlocking { go func() { backoff := 1 maxBackoff := 32 for { // If logger is closed we are done containerStream.lock.RLock() if containerStream.closed { containerStream.lock.RUnlock() break } containerStream.lock.RUnlock() err := containerStream.create() if err == nil { break } time.Sleep(time.Duration(backoff) * time.Second) if backoff < maxBackoff { backoff *= 2 } logrus. WithError(err). WithField("container-id", info.ContainerID). WithField("container-name", info.ContainerName). Error("Error while trying to initialize awslogs. Retrying in: ", backoff, " seconds") } close(creationDone) }() } else { if err = containerStream.create(); err != nil { return nil, err } close(creationDone) } go containerStream.collectBatch(creationDone) return containerStream, nil } // Parses most of the awslogs- options and prepares a config object to be used for newing the actual stream // It has been formed out to ease Utest of the New above func newStreamConfig(info logger.Info) (*logStreamConfig, error) { logGroupName := info.Config[logGroupKey] logStreamName, err := loggerutils.ParseLogTag(info, "{{.FullID}}") if err != nil { return nil, err } logCreateGroup := false if info.Config[logCreateGroupKey] != "" { logCreateGroup, err = strconv.ParseBool(info.Config[logCreateGroupKey]) if err != nil { return nil, err } } logNonBlocking := info.Config["mode"] == "non-blocking" forceFlushInterval := defaultForceFlushInterval if info.Config[forceFlushIntervalKey] != "" { forceFlushIntervalAsInt, err := strconv.Atoi(info.Config[forceFlushIntervalKey]) if err != nil { return nil, err } forceFlushInterval = time.Duration(forceFlushIntervalAsInt) * time.Second } maxBufferedEvents := int(defaultMaxBufferedEvents) if info.Config[maxBufferedEventsKey] != "" { maxBufferedEvents, err = strconv.Atoi(info.Config[maxBufferedEventsKey]) if err != nil { return nil, err } } if info.Config[logStreamKey] != "" { logStreamName = info.Config[logStreamKey] } logCreateStream := true if info.Config[logCreateStreamKey] != "" { logCreateStream, err = strconv.ParseBool(info.Config[logCreateStreamKey]) if err != nil { return nil, err } } multilinePattern, err := parseMultilineOptions(info) if err != nil { return nil, err } containerStreamConfig := &logStreamConfig{ logStreamName: logStreamName, logGroupName: logGroupName, logCreateGroup: logCreateGroup, logCreateStream: logCreateStream, logNonBlocking: logNonBlocking, forceFlushInterval: forceFlushInterval, maxBufferedEvents: maxBufferedEvents, multilinePattern: multilinePattern, } return containerStreamConfig, nil } // Parses awslogs-multiline-pattern and awslogs-datetime-format options // If awslogs-datetime-format is present, convert the format from strftime // to regexp and return. // If awslogs-multiline-pattern is present, compile regexp and return func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) { dateTimeFormat := info.Config[datetimeFormatKey] multilinePatternKey := info.Config[multilinePatternKey] // strftime input is parsed into a regular expression if dateTimeFormat != "" { // %. matches each strftime format sequence and ReplaceAllStringFunc // looks up each format sequence in the conversion table strftimeToRegex // to replace with a defined regular expression r := regexp.MustCompile("%.") multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string { return strftimeToRegex[s] }) } if multilinePatternKey != "" { multilinePattern, err := regexp.Compile(multilinePatternKey) if err != nil { return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey) } return multilinePattern, nil } return nil, nil } // Maps strftime format strings to regex var strftimeToRegex = map[string]string{ /*weekdayShort */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`, /*weekdayFull */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`, /*weekdayZeroIndex */ `%w`: `[0-6]`, /*dayZeroPadded */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`, /*monthShort */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`, /*monthFull */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`, /*monthZeroPadded */ `%m`: `(?:0[1-9]|1[0-2])`, /*yearCentury */ `%Y`: `\d{4}`, /*yearZeroPadded */ `%y`: `\d{2}`, /*hour24ZeroPadded */ `%H`: `(?:[0,1][0-9]|2[0-3])`, /*hour12ZeroPadded */ `%I`: `(?:0[0-9]|1[0-2])`, /*AM or PM */ `%p`: "[A,P]M", /*minuteZeroPadded */ `%M`: `[0-5][0-9]`, /*secondZeroPadded */ `%S`: `[0-5][0-9]`, /*microsecondZeroPadded */ `%f`: `\d{6}`, /*utcOffset */ `%z`: `[+-]\d{4}`, /*tzName */ `%Z`: `[A-Z]{1,4}T`, /*dayOfYearZeroPadded */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`, /*milliseconds */ `%L`: `\.\d{3}`, } // newRegionFinder is a variable such that the implementation // can be swapped out for unit tests. var newRegionFinder = func() (regionFinder, error) { s, err := session.NewSession() if err != nil { return nil, err } return ec2metadata.New(s), nil } // newSDKEndpoint is a variable such that the implementation // can be swapped out for unit tests. var newSDKEndpoint = credentialsEndpoint // newAWSLogsClient creates the service client for Amazon CloudWatch Logs. // Customizations to the default client from the SDK include a Docker-specific // User-Agent string and automatic region detection using the EC2 Instance // Metadata Service when region is otherwise unspecified. func newAWSLogsClient(info logger.Info) (api, error) { var region, endpoint *string if os.Getenv(regionEnvKey) != "" { region = aws.String(os.Getenv(regionEnvKey)) } if info.Config[regionKey] != "" { region = aws.String(info.Config[regionKey]) } if info.Config[endpointKey] != "" { endpoint = aws.String(info.Config[endpointKey]) } if region == nil || *region == "" { logrus.Info("Trying to get region from EC2 Metadata") ec2MetadataClient, err := newRegionFinder() if err != nil { logrus.WithError(err).Error("could not create EC2 metadata client") return nil, errors.Wrap(err, "could not create EC2 metadata client") } r, err := ec2MetadataClient.Region() if err != nil { logrus.WithError(err).Error("Could not get region from EC2 metadata, environment, or log option") return nil, errors.New("Cannot determine region for awslogs driver") } region = &r } sess, err := session.NewSession() if err != nil { return nil, errors.New("Failed to create a service client session for awslogs driver") } // attach region to cloudwatchlogs config sess.Config.Region = region // attach endpoint to cloudwatchlogs config if endpoint != nil { sess.Config.Endpoint = endpoint } if uri, ok := info.Config[credentialsEndpointKey]; ok { logrus.Debugf("Trying to get credentials from awslogs-credentials-endpoint") endpoint := fmt.Sprintf("%s%s", newSDKEndpoint, uri) creds := endpointcreds.NewCredentialsClient(*sess.Config, sess.Handlers, endpoint, func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute }) // attach credentials to cloudwatchlogs config sess.Config.Credentials = creds } logrus.WithFields(logrus.Fields{ "region": *region, }).Debug("Created awslogs client") client := cloudwatchlogs.New(sess) client.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "DockerUserAgentHandler", Fn: func(r *request.Request) { currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) r.HTTPRequest.Header.Set(userAgentHeader, fmt.Sprintf("Docker %s (%s) %s", dockerversion.Version, runtime.GOOS, currentAgent)) }, }) return client, nil } // Name returns the name of the awslogs logging driver func (l *logStream) Name() string { return name } // BufSize returns the maximum bytes CloudWatch can handle. func (l *logStream) BufSize() int { return maximumBytesPerEvent } // Log submits messages for logging by an instance of the awslogs logging driver func (l *logStream) Log(msg *logger.Message) error { l.lock.RLock() defer l.lock.RUnlock() if l.closed { return errors.New("awslogs is closed") } if l.logNonBlocking { select { case l.messages <- msg: return nil default: return errors.New("awslogs buffer is full") } } l.messages <- msg return nil } // Close closes the instance of the awslogs logging driver func (l *logStream) Close() error { l.lock.Lock() defer l.lock.Unlock() if !l.closed { close(l.messages) } l.closed = true return nil } // create creates log group and log stream for the instance of the awslogs logging driver func (l *logStream) create() error { err := l.createLogStream() if err == nil { return nil } if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == resourceNotFoundCode && l.logCreateGroup { if err := l.createLogGroup(); err != nil { return errors.Wrap(err, "failed to create Cloudwatch log group") } err = l.createLogStream() if err == nil { return nil } } return errors.Wrap(err, "failed to create Cloudwatch log stream") } // createLogGroup creates a log group for the instance of the awslogs logging driver func (l *logStream) createLogGroup() error { if _, err := l.client.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ LogGroupName: aws.String(l.logGroupName), }); err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logCreateGroup": l.logCreateGroup, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log group already exists") return nil } logrus.WithFields(fields).Error("Failed to create log group") } return err } return nil } // createLogStream creates a log stream for the instance of the awslogs logging driver func (l *logStream) createLogStream() error { // Directly return if we do not want to create log stream. if !l.logCreateStream { logrus.WithFields(logrus.Fields{ "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, "logCreateStream": l.logCreateStream, }).Info("Skipping creating log stream") return nil } input := &cloudwatchlogs.CreateLogStreamInput{ LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } _, err := l.client.CreateLogStream(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log stream already exists") return nil } logrus.WithFields(fields).Error("Failed to create log stream") } } return err } // newTicker is used for time-based batching. newTicker is a variable such // that the implementation can be swapped out for unit tests. var newTicker = func(freq time.Duration) *time.Ticker { return time.NewTicker(freq) } // collectBatch executes as a goroutine to perform batching of log events for // submission to the log stream. If the awslogs-multiline-pattern or // awslogs-datetime-format options have been configured, multiline processing // is enabled, where log messages are stored in an event buffer until a multiline // pattern match is found, at which point the messages in the event buffer are // pushed to CloudWatch logs as a single log event. Multiline messages are processed // according to the maximumBytesPerPut constraint, and the implementation only // allows for messages to be buffered for a maximum of 2*batchPublishFrequency // seconds. When events are ready to be processed for submission to CloudWatch // Logs, the processEvents method is called. If a multiline pattern is not // configured, log events are submitted to the processEvents method immediately. func (l *logStream) collectBatch(created chan bool) { // Wait for the logstream/group to be created <-created flushInterval := l.forceFlushInterval if flushInterval <= 0 { flushInterval = defaultForceFlushInterval } ticker := newTicker(flushInterval) var eventBuffer []byte var eventBufferTimestamp int64 var batch = newEventBatch() for { select { case t := <-ticker.C: // If event buffer is older than batch publish frequency flush the event buffer if eventBufferTimestamp > 0 && len(eventBuffer) > 0 { eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp eventBufferExpired := eventBufferAge >= int64(flushInterval)/int64(time.Millisecond) eventBufferNegative := eventBufferAge < 0 if eventBufferExpired || eventBufferNegative { l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBuffer = eventBuffer[:0] } } l.publishBatch(batch) batch.reset() case msg, more := <-l.messages: if !more { // Flush event buffer and release resources l.processEvent(batch, eventBuffer, eventBufferTimestamp) l.publishBatch(batch) batch.reset() return } if eventBufferTimestamp == 0 { eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) } line := msg.Line if l.multilinePattern != nil { lineEffectiveLen := effectiveLen(string(line)) if l.multilinePattern.Match(line) || effectiveLen(string(eventBuffer))+lineEffectiveLen > maximumBytesPerEvent { // This is a new log event or we will exceed max bytes per event // so flush the current eventBuffer to events and reset timestamp l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) eventBuffer = eventBuffer[:0] } // Append newline if event is less than max event size if lineEffectiveLen < maximumBytesPerEvent { line = append(line, "\n"...) } eventBuffer = append(eventBuffer, line...) logger.PutMessage(msg) } else { l.processEvent(batch, line, msg.Timestamp.UnixNano()/int64(time.Millisecond)) logger.PutMessage(msg) } } } } // processEvent processes log events that are ready for submission to CloudWatch // logs. Batching is performed on time- and size-bases. Time-based batching // occurs at a 5 second interval (defined in the batchPublishFrequency const). // Size-based batching is performed on the maximum number of events per batch // (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a // batch (defined in maximumBytesPerPut). Log messages are split by the maximum // bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event // byte overhead (defined in perEventBytes) which is accounted for in split- and // batch-calculations. Because the events are interpreted as UTF-8 encoded // Unicode, invalid UTF-8 byte sequences are replaced with the Unicode // replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To // compensate for that and to avoid splitting valid UTF-8 characters into // invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. func (l *logStream) processEvent(batch *eventBatch, bytes []byte, timestamp int64) { for len(bytes) > 0 { // Split line length so it does not exceed the maximum splitOffset, lineBytes := findValidSplit(string(bytes), maximumBytesPerEvent) line := bytes[:splitOffset] event := wrappedEvent{ inputLogEvent: &cloudwatchlogs.InputLogEvent{ Message: aws.String(string(line)), Timestamp: aws.Int64(timestamp), }, insertOrder: batch.count(), } added := batch.add(event, lineBytes) if added { bytes = bytes[splitOffset:] } else { l.publishBatch(batch) batch.reset() } } } // effectiveLen counts the effective number of bytes in the string, after // UTF-8 normalization. UTF-8 normalization includes replacing bytes that do // not constitute valid UTF-8 encoded Unicode codepoints with the Unicode // replacement codepoint U+FFFD (a 3-byte UTF-8 sequence, represented in Go as // utf8.RuneError) func effectiveLen(line string) int { effectiveBytes := 0 for _, rune := range line { effectiveBytes += utf8.RuneLen(rune) } return effectiveBytes } // findValidSplit finds the byte offset to split a string without breaking valid // Unicode codepoints given a maximum number of total bytes. findValidSplit // returns the byte offset for splitting a string or []byte, as well as the // effective number of bytes if the string were normalized to replace invalid // UTF-8 encoded bytes with the Unicode replacement character (a 3-byte UTF-8 // sequence, represented in Go as utf8.RuneError) func findValidSplit(line string, maxBytes int) (splitOffset, effectiveBytes int) { for offset, rune := range line { splitOffset = offset if effectiveBytes+utf8.RuneLen(rune) > maxBytes { return splitOffset, effectiveBytes } effectiveBytes += utf8.RuneLen(rune) } splitOffset = len(line) return } // publishBatch calls PutLogEvents for a given set of InputLogEvents, // accounting for sequencing requirements (each request must reference the // sequence token returned by the previous request). func (l *logStream) publishBatch(batch *eventBatch) { if batch.isEmpty() { return } cwEvents := unwrapEvents(batch.events()) nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == dataAlreadyAcceptedCode { // already submitted, just grab the correct sequence token parts := strings.Split(awsErr.Message(), " ") nextSequenceToken = &parts[len(parts)-1] logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Info("Data already accepted, ignoring error") err = nil } else if awsErr.Code() == invalidSequenceTokenCode { // sequence code is bad, grab the correct one and retry parts := strings.Split(awsErr.Message(), " ") token := parts[len(parts)-1] nextSequenceToken, err = l.putLogEvents(cwEvents, &token) } } } if err != nil { logrus.Error(err) } else { l.sequenceToken = nextSequenceToken } } // putLogEvents wraps the PutLogEvents API func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { input := &cloudwatchlogs.PutLogEventsInput{ LogEvents: events, SequenceToken: sequenceToken, LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } resp, err := l.client.PutLogEvents(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Error("Failed to put log events") } return nil, err } return resp.NextSequenceToken, nil } // ValidateLogOpt looks for awslogs-specific log options awslogs-region, awslogs-endpoint // awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format, // awslogs-multiline-pattern func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case logGroupKey: case logStreamKey: case logCreateGroupKey: case regionKey: case endpointKey: case tagKey: case datetimeFormatKey: case multilinePatternKey: case credentialsEndpointKey: case forceFlushIntervalKey: case maxBufferedEventsKey: default: return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) } } if cfg[logGroupKey] == "" { return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) } if cfg[logCreateGroupKey] != "" { if _, err := strconv.ParseBool(cfg[logCreateGroupKey]); err != nil { return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err) } } if cfg[forceFlushIntervalKey] != "" { if value, err := strconv.Atoi(cfg[forceFlushIntervalKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", forceFlushIntervalKey, cfg[forceFlushIntervalKey]) } } if cfg[maxBufferedEventsKey] != "" { if value, err := strconv.Atoi(cfg[maxBufferedEventsKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", maxBufferedEventsKey, cfg[maxBufferedEventsKey]) } } _, datetimeFormatKeyExists := cfg[datetimeFormatKey] _, multilinePatternKeyExists := cfg[multilinePatternKey] if datetimeFormatKeyExists && multilinePatternKeyExists { return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey) } return nil } // Len returns the length of a byTimestamp slice. Len is required by the // sort.Interface interface. func (slice byTimestamp) Len() int { return len(slice) } // Less compares two values in a byTimestamp slice by Timestamp. Less is // required by the sort.Interface interface. func (slice byTimestamp) Less(i, j int) bool { iTimestamp, jTimestamp := int64(0), int64(0) if slice != nil && slice[i].inputLogEvent.Timestamp != nil { iTimestamp = *slice[i].inputLogEvent.Timestamp } if slice != nil && slice[j].inputLogEvent.Timestamp != nil { jTimestamp = *slice[j].inputLogEvent.Timestamp } if iTimestamp == jTimestamp { return slice[i].insertOrder < slice[j].insertOrder } return iTimestamp < jTimestamp } // Swap swaps two values in a byTimestamp slice with each other. Swap is // required by the sort.Interface interface. func (slice byTimestamp) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events)) for i, input := range events { cwEvents[i] = input.inputLogEvent } return cwEvents } func newEventBatch() *eventBatch { return &eventBatch{ batch: make([]wrappedEvent, 0), bytes: 0, } } // events returns a slice of wrappedEvents sorted in order of their // timestamps and then by their insertion order (see `byTimestamp`). // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) events() []wrappedEvent { sort.Sort(byTimestamp(b.batch)) return b.batch } // add adds an event to the batch of events accounting for the // necessary overhead for an event to be logged. An error will be // returned if the event cannot be added to the batch due to service // limits. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) add(event wrappedEvent, size int) bool { addBytes := size + perEventBytes // verify we are still within service limits switch { case len(b.batch)+1 > maximumLogEventsPerPut: return false case b.bytes+addBytes > maximumBytesPerPut: return false } b.bytes += addBytes b.batch = append(b.batch, event) return true } // count is the number of batched events. Warning: this method // is not threadsafe and must not be used concurrently. func (b *eventBatch) count() int { return len(b.batch) } // size is the total number of bytes that the batch represents. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) size() int { return b.bytes } func (b *eventBatch) isEmpty() bool { zeroEvents := b.count() == 0 zeroSize := b.size() == 0 return zeroEvents && zeroSize } // reset prepares the batch for reuse. func (b *eventBatch) reset() { b.bytes = 0 b.batch = b.batch[:0] }
// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs package awslogs // import "github.com/docker/docker/daemon/logger/awslogs" import ( "fmt" "os" "regexp" "runtime" "sort" "strconv" "strings" "sync" "time" "unicode/utf8" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/dockerversion" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( name = "awslogs" regionKey = "awslogs-region" endpointKey = "awslogs-endpoint" regionEnvKey = "AWS_REGION" logGroupKey = "awslogs-group" logStreamKey = "awslogs-stream" logCreateGroupKey = "awslogs-create-group" logCreateStreamKey = "awslogs-create-stream" tagKey = "tag" datetimeFormatKey = "awslogs-datetime-format" multilinePatternKey = "awslogs-multiline-pattern" credentialsEndpointKey = "awslogs-credentials-endpoint" forceFlushIntervalKey = "awslogs-force-flush-interval-seconds" maxBufferedEventsKey = "awslogs-max-buffered-events" logFormatKey = "awslogs-format" defaultForceFlushInterval = 5 * time.Second defaultMaxBufferedEvents = 4096 // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html perEventBytes = 26 maximumBytesPerPut = 1048576 maximumLogEventsPerPut = 10000 // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html // Because the events are interpreted as UTF-8 encoded Unicode, invalid UTF-8 byte sequences are replaced with the // Unicode replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To compensate for that and to avoid // splitting valid UTF-8 characters into invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. maximumBytesPerEvent = 262144 - perEventBytes resourceAlreadyExistsCode = "ResourceAlreadyExistsException" dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" invalidSequenceTokenCode = "InvalidSequenceTokenException" resourceNotFoundCode = "ResourceNotFoundException" credentialsEndpoint = "http://169.254.170.2" userAgentHeader = "User-Agent" // See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format_Specification.html logsFormatHeader = "x-amzn-logs-format" jsonEmfLogFormat = "json/emf" ) type logStream struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration multilinePattern *regexp.Regexp client api messages chan *logger.Message lock sync.RWMutex closed bool sequenceToken *string } type logStreamConfig struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration maxBufferedEvents int multilinePattern *regexp.Regexp } var _ logger.SizedLogger = &logStream{} type api interface { CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) } type regionFinder interface { Region() (string, error) } type wrappedEvent struct { inputLogEvent *cloudwatchlogs.InputLogEvent insertOrder int } type byTimestamp []wrappedEvent // init registers the awslogs driver func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // eventBatch holds the events that are batched for submission and the // associated data about it. // // Warning: this type is not threadsafe and must not be used // concurrently. This type is expected to be consumed in a single go // routine and never concurrently. type eventBatch struct { batch []wrappedEvent bytes int } // New creates an awslogs logger using the configuration passed in on the // context. Supported context configuration variables are awslogs-region, // awslogs-endpoint, awslogs-group, awslogs-stream, awslogs-create-group, // awslogs-multiline-pattern and awslogs-datetime-format. // When available, configuration is also taken from environment variables // AWS_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, the shared credentials // file (~/.aws/credentials), and the EC2 Instance Metadata Service. func New(info logger.Info) (logger.Logger, error) { containerStreamConfig, err := newStreamConfig(info) if err != nil { return nil, err } client, err := newAWSLogsClient(info) if err != nil { return nil, err } containerStream := &logStream{ logStreamName: containerStreamConfig.logStreamName, logGroupName: containerStreamConfig.logGroupName, logCreateGroup: containerStreamConfig.logCreateGroup, logCreateStream: containerStreamConfig.logCreateStream, logNonBlocking: containerStreamConfig.logNonBlocking, forceFlushInterval: containerStreamConfig.forceFlushInterval, multilinePattern: containerStreamConfig.multilinePattern, client: client, messages: make(chan *logger.Message, containerStreamConfig.maxBufferedEvents), } creationDone := make(chan bool) if containerStream.logNonBlocking { go func() { backoff := 1 maxBackoff := 32 for { // If logger is closed we are done containerStream.lock.RLock() if containerStream.closed { containerStream.lock.RUnlock() break } containerStream.lock.RUnlock() err := containerStream.create() if err == nil { break } time.Sleep(time.Duration(backoff) * time.Second) if backoff < maxBackoff { backoff *= 2 } logrus. WithError(err). WithField("container-id", info.ContainerID). WithField("container-name", info.ContainerName). Error("Error while trying to initialize awslogs. Retrying in: ", backoff, " seconds") } close(creationDone) }() } else { if err = containerStream.create(); err != nil { return nil, err } close(creationDone) } go containerStream.collectBatch(creationDone) return containerStream, nil } // Parses most of the awslogs- options and prepares a config object to be used for newing the actual stream // It has been formed out to ease Utest of the New above func newStreamConfig(info logger.Info) (*logStreamConfig, error) { logGroupName := info.Config[logGroupKey] logStreamName, err := loggerutils.ParseLogTag(info, "{{.FullID}}") if err != nil { return nil, err } logCreateGroup := false if info.Config[logCreateGroupKey] != "" { logCreateGroup, err = strconv.ParseBool(info.Config[logCreateGroupKey]) if err != nil { return nil, err } } logNonBlocking := info.Config["mode"] == "non-blocking" forceFlushInterval := defaultForceFlushInterval if info.Config[forceFlushIntervalKey] != "" { forceFlushIntervalAsInt, err := strconv.Atoi(info.Config[forceFlushIntervalKey]) if err != nil { return nil, err } forceFlushInterval = time.Duration(forceFlushIntervalAsInt) * time.Second } maxBufferedEvents := int(defaultMaxBufferedEvents) if info.Config[maxBufferedEventsKey] != "" { maxBufferedEvents, err = strconv.Atoi(info.Config[maxBufferedEventsKey]) if err != nil { return nil, err } } if info.Config[logStreamKey] != "" { logStreamName = info.Config[logStreamKey] } logCreateStream := true if info.Config[logCreateStreamKey] != "" { logCreateStream, err = strconv.ParseBool(info.Config[logCreateStreamKey]) if err != nil { return nil, err } } multilinePattern, err := parseMultilineOptions(info) if err != nil { return nil, err } containerStreamConfig := &logStreamConfig{ logStreamName: logStreamName, logGroupName: logGroupName, logCreateGroup: logCreateGroup, logCreateStream: logCreateStream, logNonBlocking: logNonBlocking, forceFlushInterval: forceFlushInterval, maxBufferedEvents: maxBufferedEvents, multilinePattern: multilinePattern, } return containerStreamConfig, nil } // Parses awslogs-multiline-pattern and awslogs-datetime-format options // If awslogs-datetime-format is present, convert the format from strftime // to regexp and return. // If awslogs-multiline-pattern is present, compile regexp and return func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) { dateTimeFormat := info.Config[datetimeFormatKey] multilinePatternKey := info.Config[multilinePatternKey] // strftime input is parsed into a regular expression if dateTimeFormat != "" { // %. matches each strftime format sequence and ReplaceAllStringFunc // looks up each format sequence in the conversion table strftimeToRegex // to replace with a defined regular expression r := regexp.MustCompile("%.") multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string { return strftimeToRegex[s] }) } if multilinePatternKey != "" { multilinePattern, err := regexp.Compile(multilinePatternKey) if err != nil { return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey) } return multilinePattern, nil } return nil, nil } // Maps strftime format strings to regex var strftimeToRegex = map[string]string{ /*weekdayShort */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`, /*weekdayFull */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`, /*weekdayZeroIndex */ `%w`: `[0-6]`, /*dayZeroPadded */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`, /*monthShort */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`, /*monthFull */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`, /*monthZeroPadded */ `%m`: `(?:0[1-9]|1[0-2])`, /*yearCentury */ `%Y`: `\d{4}`, /*yearZeroPadded */ `%y`: `\d{2}`, /*hour24ZeroPadded */ `%H`: `(?:[0,1][0-9]|2[0-3])`, /*hour12ZeroPadded */ `%I`: `(?:0[0-9]|1[0-2])`, /*AM or PM */ `%p`: "[A,P]M", /*minuteZeroPadded */ `%M`: `[0-5][0-9]`, /*secondZeroPadded */ `%S`: `[0-5][0-9]`, /*microsecondZeroPadded */ `%f`: `\d{6}`, /*utcOffset */ `%z`: `[+-]\d{4}`, /*tzName */ `%Z`: `[A-Z]{1,4}T`, /*dayOfYearZeroPadded */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`, /*milliseconds */ `%L`: `\.\d{3}`, } // newRegionFinder is a variable such that the implementation // can be swapped out for unit tests. var newRegionFinder = func() (regionFinder, error) { s, err := session.NewSession() if err != nil { return nil, err } return ec2metadata.New(s), nil } // newSDKEndpoint is a variable such that the implementation // can be swapped out for unit tests. var newSDKEndpoint = credentialsEndpoint // newAWSLogsClient creates the service client for Amazon CloudWatch Logs. // Customizations to the default client from the SDK include a Docker-specific // User-Agent string and automatic region detection using the EC2 Instance // Metadata Service when region is otherwise unspecified. func newAWSLogsClient(info logger.Info) (api, error) { var region, endpoint *string if os.Getenv(regionEnvKey) != "" { region = aws.String(os.Getenv(regionEnvKey)) } if info.Config[regionKey] != "" { region = aws.String(info.Config[regionKey]) } if info.Config[endpointKey] != "" { endpoint = aws.String(info.Config[endpointKey]) } if region == nil || *region == "" { logrus.Info("Trying to get region from EC2 Metadata") ec2MetadataClient, err := newRegionFinder() if err != nil { logrus.WithError(err).Error("could not create EC2 metadata client") return nil, errors.Wrap(err, "could not create EC2 metadata client") } r, err := ec2MetadataClient.Region() if err != nil { logrus.WithError(err).Error("Could not get region from EC2 metadata, environment, or log option") return nil, errors.New("Cannot determine region for awslogs driver") } region = &r } sess, err := session.NewSession() if err != nil { return nil, errors.New("Failed to create a service client session for awslogs driver") } // attach region to cloudwatchlogs config sess.Config.Region = region // attach endpoint to cloudwatchlogs config if endpoint != nil { sess.Config.Endpoint = endpoint } if uri, ok := info.Config[credentialsEndpointKey]; ok { logrus.Debugf("Trying to get credentials from awslogs-credentials-endpoint") endpoint := fmt.Sprintf("%s%s", newSDKEndpoint, uri) creds := endpointcreds.NewCredentialsClient(*sess.Config, sess.Handlers, endpoint, func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute }) // attach credentials to cloudwatchlogs config sess.Config.Credentials = creds } logrus.WithFields(logrus.Fields{ "region": *region, }).Debug("Created awslogs client") client := cloudwatchlogs.New(sess) client.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "DockerUserAgentHandler", Fn: func(r *request.Request) { currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) r.HTTPRequest.Header.Set(userAgentHeader, fmt.Sprintf("Docker %s (%s) %s", dockerversion.Version, runtime.GOOS, currentAgent)) }, }) if info.Config[logFormatKey] != "" { client.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "LogFormatHeaderHandler", Fn: func(req *request.Request) { req.HTTPRequest.Header.Set(logsFormatHeader, info.Config[logFormatKey]) }, }) } return client, nil } // Name returns the name of the awslogs logging driver func (l *logStream) Name() string { return name } // BufSize returns the maximum bytes CloudWatch can handle. func (l *logStream) BufSize() int { return maximumBytesPerEvent } // Log submits messages for logging by an instance of the awslogs logging driver func (l *logStream) Log(msg *logger.Message) error { l.lock.RLock() defer l.lock.RUnlock() if l.closed { return errors.New("awslogs is closed") } if l.logNonBlocking { select { case l.messages <- msg: return nil default: return errors.New("awslogs buffer is full") } } l.messages <- msg return nil } // Close closes the instance of the awslogs logging driver func (l *logStream) Close() error { l.lock.Lock() defer l.lock.Unlock() if !l.closed { close(l.messages) } l.closed = true return nil } // create creates log group and log stream for the instance of the awslogs logging driver func (l *logStream) create() error { err := l.createLogStream() if err == nil { return nil } if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == resourceNotFoundCode && l.logCreateGroup { if err := l.createLogGroup(); err != nil { return errors.Wrap(err, "failed to create Cloudwatch log group") } err = l.createLogStream() if err == nil { return nil } } return errors.Wrap(err, "failed to create Cloudwatch log stream") } // createLogGroup creates a log group for the instance of the awslogs logging driver func (l *logStream) createLogGroup() error { if _, err := l.client.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ LogGroupName: aws.String(l.logGroupName), }); err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logCreateGroup": l.logCreateGroup, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log group already exists") return nil } logrus.WithFields(fields).Error("Failed to create log group") } return err } return nil } // createLogStream creates a log stream for the instance of the awslogs logging driver func (l *logStream) createLogStream() error { // Directly return if we do not want to create log stream. if !l.logCreateStream { logrus.WithFields(logrus.Fields{ "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, "logCreateStream": l.logCreateStream, }).Info("Skipping creating log stream") return nil } input := &cloudwatchlogs.CreateLogStreamInput{ LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } _, err := l.client.CreateLogStream(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log stream already exists") return nil } logrus.WithFields(fields).Error("Failed to create log stream") } } return err } // newTicker is used for time-based batching. newTicker is a variable such // that the implementation can be swapped out for unit tests. var newTicker = func(freq time.Duration) *time.Ticker { return time.NewTicker(freq) } // collectBatch executes as a goroutine to perform batching of log events for // submission to the log stream. If the awslogs-multiline-pattern or // awslogs-datetime-format options have been configured, multiline processing // is enabled, where log messages are stored in an event buffer until a multiline // pattern match is found, at which point the messages in the event buffer are // pushed to CloudWatch logs as a single log event. Multiline messages are processed // according to the maximumBytesPerPut constraint, and the implementation only // allows for messages to be buffered for a maximum of 2*batchPublishFrequency // seconds. When events are ready to be processed for submission to CloudWatch // Logs, the processEvents method is called. If a multiline pattern is not // configured, log events are submitted to the processEvents method immediately. func (l *logStream) collectBatch(created chan bool) { // Wait for the logstream/group to be created <-created flushInterval := l.forceFlushInterval if flushInterval <= 0 { flushInterval = defaultForceFlushInterval } ticker := newTicker(flushInterval) var eventBuffer []byte var eventBufferTimestamp int64 var batch = newEventBatch() for { select { case t := <-ticker.C: // If event buffer is older than batch publish frequency flush the event buffer if eventBufferTimestamp > 0 && len(eventBuffer) > 0 { eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp eventBufferExpired := eventBufferAge >= int64(flushInterval)/int64(time.Millisecond) eventBufferNegative := eventBufferAge < 0 if eventBufferExpired || eventBufferNegative { l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBuffer = eventBuffer[:0] } } l.publishBatch(batch) batch.reset() case msg, more := <-l.messages: if !more { // Flush event buffer and release resources l.processEvent(batch, eventBuffer, eventBufferTimestamp) l.publishBatch(batch) batch.reset() return } if eventBufferTimestamp == 0 { eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) } line := msg.Line if l.multilinePattern != nil { lineEffectiveLen := effectiveLen(string(line)) if l.multilinePattern.Match(line) || effectiveLen(string(eventBuffer))+lineEffectiveLen > maximumBytesPerEvent { // This is a new log event or we will exceed max bytes per event // so flush the current eventBuffer to events and reset timestamp l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) eventBuffer = eventBuffer[:0] } // Append newline if event is less than max event size if lineEffectiveLen < maximumBytesPerEvent { line = append(line, "\n"...) } eventBuffer = append(eventBuffer, line...) logger.PutMessage(msg) } else { l.processEvent(batch, line, msg.Timestamp.UnixNano()/int64(time.Millisecond)) logger.PutMessage(msg) } } } } // processEvent processes log events that are ready for submission to CloudWatch // logs. Batching is performed on time- and size-bases. Time-based batching // occurs at a 5 second interval (defined in the batchPublishFrequency const). // Size-based batching is performed on the maximum number of events per batch // (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a // batch (defined in maximumBytesPerPut). Log messages are split by the maximum // bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event // byte overhead (defined in perEventBytes) which is accounted for in split- and // batch-calculations. Because the events are interpreted as UTF-8 encoded // Unicode, invalid UTF-8 byte sequences are replaced with the Unicode // replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To // compensate for that and to avoid splitting valid UTF-8 characters into // invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. func (l *logStream) processEvent(batch *eventBatch, bytes []byte, timestamp int64) { for len(bytes) > 0 { // Split line length so it does not exceed the maximum splitOffset, lineBytes := findValidSplit(string(bytes), maximumBytesPerEvent) line := bytes[:splitOffset] event := wrappedEvent{ inputLogEvent: &cloudwatchlogs.InputLogEvent{ Message: aws.String(string(line)), Timestamp: aws.Int64(timestamp), }, insertOrder: batch.count(), } added := batch.add(event, lineBytes) if added { bytes = bytes[splitOffset:] } else { l.publishBatch(batch) batch.reset() } } } // effectiveLen counts the effective number of bytes in the string, after // UTF-8 normalization. UTF-8 normalization includes replacing bytes that do // not constitute valid UTF-8 encoded Unicode codepoints with the Unicode // replacement codepoint U+FFFD (a 3-byte UTF-8 sequence, represented in Go as // utf8.RuneError) func effectiveLen(line string) int { effectiveBytes := 0 for _, rune := range line { effectiveBytes += utf8.RuneLen(rune) } return effectiveBytes } // findValidSplit finds the byte offset to split a string without breaking valid // Unicode codepoints given a maximum number of total bytes. findValidSplit // returns the byte offset for splitting a string or []byte, as well as the // effective number of bytes if the string were normalized to replace invalid // UTF-8 encoded bytes with the Unicode replacement character (a 3-byte UTF-8 // sequence, represented in Go as utf8.RuneError) func findValidSplit(line string, maxBytes int) (splitOffset, effectiveBytes int) { for offset, rune := range line { splitOffset = offset if effectiveBytes+utf8.RuneLen(rune) > maxBytes { return splitOffset, effectiveBytes } effectiveBytes += utf8.RuneLen(rune) } splitOffset = len(line) return } // publishBatch calls PutLogEvents for a given set of InputLogEvents, // accounting for sequencing requirements (each request must reference the // sequence token returned by the previous request). func (l *logStream) publishBatch(batch *eventBatch) { if batch.isEmpty() { return } cwEvents := unwrapEvents(batch.events()) nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == dataAlreadyAcceptedCode { // already submitted, just grab the correct sequence token parts := strings.Split(awsErr.Message(), " ") nextSequenceToken = &parts[len(parts)-1] logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Info("Data already accepted, ignoring error") err = nil } else if awsErr.Code() == invalidSequenceTokenCode { // sequence code is bad, grab the correct one and retry parts := strings.Split(awsErr.Message(), " ") token := parts[len(parts)-1] nextSequenceToken, err = l.putLogEvents(cwEvents, &token) } } } if err != nil { logrus.Error(err) } else { l.sequenceToken = nextSequenceToken } } // putLogEvents wraps the PutLogEvents API func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { input := &cloudwatchlogs.PutLogEventsInput{ LogEvents: events, SequenceToken: sequenceToken, LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } resp, err := l.client.PutLogEvents(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Error("Failed to put log events") } return nil, err } return resp.NextSequenceToken, nil } // ValidateLogOpt looks for awslogs-specific log options awslogs-region, awslogs-endpoint // awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format, // awslogs-multiline-pattern func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case logGroupKey: case logStreamKey: case logCreateGroupKey: case regionKey: case endpointKey: case tagKey: case datetimeFormatKey: case multilinePatternKey: case credentialsEndpointKey: case forceFlushIntervalKey: case maxBufferedEventsKey: case logFormatKey: default: return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) } } if cfg[logGroupKey] == "" { return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) } if cfg[logCreateGroupKey] != "" { if _, err := strconv.ParseBool(cfg[logCreateGroupKey]); err != nil { return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err) } } if cfg[forceFlushIntervalKey] != "" { if value, err := strconv.Atoi(cfg[forceFlushIntervalKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", forceFlushIntervalKey, cfg[forceFlushIntervalKey]) } } if cfg[maxBufferedEventsKey] != "" { if value, err := strconv.Atoi(cfg[maxBufferedEventsKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", maxBufferedEventsKey, cfg[maxBufferedEventsKey]) } } _, datetimeFormatKeyExists := cfg[datetimeFormatKey] _, multilinePatternKeyExists := cfg[multilinePatternKey] if datetimeFormatKeyExists && multilinePatternKeyExists { return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey) } if cfg[logFormatKey] != "" { // For now, only the "json/emf" log format is supported if cfg[logFormatKey] != jsonEmfLogFormat { return fmt.Errorf("unsupported log format '%s'", cfg[logFormatKey]) } if datetimeFormatKeyExists || multilinePatternKeyExists { return fmt.Errorf("you cannot configure log opt '%s' or '%s' when log opt '%s' is set to '%s'", datetimeFormatKey, multilinePatternKey, logFormatKey, jsonEmfLogFormat) } } return nil } // Len returns the length of a byTimestamp slice. Len is required by the // sort.Interface interface. func (slice byTimestamp) Len() int { return len(slice) } // Less compares two values in a byTimestamp slice by Timestamp. Less is // required by the sort.Interface interface. func (slice byTimestamp) Less(i, j int) bool { iTimestamp, jTimestamp := int64(0), int64(0) if slice != nil && slice[i].inputLogEvent.Timestamp != nil { iTimestamp = *slice[i].inputLogEvent.Timestamp } if slice != nil && slice[j].inputLogEvent.Timestamp != nil { jTimestamp = *slice[j].inputLogEvent.Timestamp } if iTimestamp == jTimestamp { return slice[i].insertOrder < slice[j].insertOrder } return iTimestamp < jTimestamp } // Swap swaps two values in a byTimestamp slice with each other. Swap is // required by the sort.Interface interface. func (slice byTimestamp) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events)) for i, input := range events { cwEvents[i] = input.inputLogEvent } return cwEvents } func newEventBatch() *eventBatch { return &eventBatch{ batch: make([]wrappedEvent, 0), bytes: 0, } } // events returns a slice of wrappedEvents sorted in order of their // timestamps and then by their insertion order (see `byTimestamp`). // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) events() []wrappedEvent { sort.Sort(byTimestamp(b.batch)) return b.batch } // add adds an event to the batch of events accounting for the // necessary overhead for an event to be logged. An error will be // returned if the event cannot be added to the batch due to service // limits. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) add(event wrappedEvent, size int) bool { addBytes := size + perEventBytes // verify we are still within service limits switch { case len(b.batch)+1 > maximumLogEventsPerPut: return false case b.bytes+addBytes > maximumBytesPerPut: return false } b.bytes += addBytes b.batch = append(b.batch, event) return true } // count is the number of batched events. Warning: this method // is not threadsafe and must not be used concurrently. func (b *eventBatch) count() int { return len(b.batch) } // size is the total number of bytes that the batch represents. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) size() int { return b.bytes } func (b *eventBatch) isEmpty() bool { zeroEvents := b.count() == 0 zeroSize := b.size() == 0 return zeroEvents && zeroSize } // reset prepares the batch for reuse. func (b *eventBatch) reset() { b.bytes = 0 b.batch = b.batch[:0] }
sanjams2
b46ab1f57997feed98aa2cccd8e4f5a612b963bc
787b8fe14f34f04f723bd0913f293a59be428fe6
I think we should ensure that specifying `json/emf` is mutually-exclusive with specifying `awslogs-datetime-format` or `awslogs-multiline-pattern`. Those options change the default delimiter of a LogEvent from a newline to whatever is specified in those options, which rely on parsing the log message content. While I'm not convinced that it's actually _impossible_ to specify one of those options that would work with the JSON structure of an EMF message, I think it's difficult and a likely source of broken logging rather than the intended behavior.
samuelkarp
4,449
moby/moby
42,838
Add an option to specify log format for awslogs driver
Added an option 'awslogs-format' to allow specifying a log format for the logs sent CloudWatch from the aws log driver. For now, only the 'json/emf' format is supported. If no option is provided, the log format header in the request to CloudWatch will be omitted as before. Signed-off-by: James Sanders <[email protected]> **- What I did** - Added an option `awslogs-format` to allow specifying a log format for the logs sent CloudWatch from the aws log driver. - For now, only the `json/emf` format is supported. - If no option is provided, the log format header in the request to CloudWatch will be omitted as before. - This addresses https://github.com/moby/moby/issues/42731 - Users cannot provide the `awslogs-datetime-format` or the `awslogs-multiline-pattern` options when they set the `awslogs-format` option cannot be set to `json/emf`. This is because both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options modify the log line delimiter. Given for now `json/emf` format must be a valid json line, it does not make sense to allow for different line delimiters when using `json/emf`. **- How I did it** - Modified the `awslogs` log driver to add a new CloudWatch client "handler" (basically middleware) to add the `x-amzn-logs-format` header to the request when the log format option is given and is `json/emf`. - Tried to follow convention at every step of the way. - Based key name off of the key name used in the [fluentd plugin](https://docs.fluentbit.io/manual/pipeline/outputs/cloudwatch) **- How to verify it** - Unit Testing - Manual integration testing: - Ran container with no format option provided and verified logs were still published (no regression) - Ran container in development environment with `--log-opt awslogs-format=json/emf` enabled and saw metrics posted to CloudWatch: ```bash docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format=json/emf \ emf-logger \ run ``` - Metrics in CloudWatch: <img width="1091" alt="image" src="https://user-images.githubusercontent.com/10730172/132904614-a4c05775-a495-449c-9468-308edf9a0aa6.png"> - Attempted to specify a "random" log format and saw an error was thrown ```bash # docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format=random \ emf-logger \ run docker: Error response from daemon: unsupported log format 'random'. ``` - Attempted to set `awslogs-datetime-format` and saw an error was thrown ```bash docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format='json/emf' \ --log-opt awslogs-datetime-format='YYYY-mm-dd' \ emf-logger \ serve docker: Error response from daemon: you cannot configure log opt 'awslogs-datetime-format' or 'awslogs-multiline-pattern' when log opt 'awslogs-format' is set to 'json/emf'. ``` - Attempted to set `awslogs-multiline-pattern` and saw an error was thrown ```bash docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format='json/emf' \ --log-opt awslogs-multiline-pattern='-------' \ emf-logger \ serve docker: Error response from daemon: you cannot configure log opt 'awslogs-datetime-format' or 'awslogs-multiline-pattern' when log opt 'awslogs-format' is set to 'json/emf'. ``` **- Description for the changelog** Added a new option to `awslogs` log driver to specify the log format that is sent to CloudWatch. **- A picture of a cute animal (not mandatory but encouraged)** ![image](https://user-images.githubusercontent.com/10730172/132908691-81dd1525-5133-4ac2-938e-b1e7d84e30c9.png)
null
2021-09-10 20:09:50+00:00
2021-12-02 19:48:07+00:00
daemon/logger/awslogs/cloudwatchlogs.go
// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs package awslogs // import "github.com/docker/docker/daemon/logger/awslogs" import ( "fmt" "os" "regexp" "runtime" "sort" "strconv" "strings" "sync" "time" "unicode/utf8" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/dockerversion" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( name = "awslogs" regionKey = "awslogs-region" endpointKey = "awslogs-endpoint" regionEnvKey = "AWS_REGION" logGroupKey = "awslogs-group" logStreamKey = "awslogs-stream" logCreateGroupKey = "awslogs-create-group" logCreateStreamKey = "awslogs-create-stream" tagKey = "tag" datetimeFormatKey = "awslogs-datetime-format" multilinePatternKey = "awslogs-multiline-pattern" credentialsEndpointKey = "awslogs-credentials-endpoint" forceFlushIntervalKey = "awslogs-force-flush-interval-seconds" maxBufferedEventsKey = "awslogs-max-buffered-events" defaultForceFlushInterval = 5 * time.Second defaultMaxBufferedEvents = 4096 // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html perEventBytes = 26 maximumBytesPerPut = 1048576 maximumLogEventsPerPut = 10000 // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html // Because the events are interpreted as UTF-8 encoded Unicode, invalid UTF-8 byte sequences are replaced with the // Unicode replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To compensate for that and to avoid // splitting valid UTF-8 characters into invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. maximumBytesPerEvent = 262144 - perEventBytes resourceAlreadyExistsCode = "ResourceAlreadyExistsException" dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" invalidSequenceTokenCode = "InvalidSequenceTokenException" resourceNotFoundCode = "ResourceNotFoundException" credentialsEndpoint = "http://169.254.170.2" userAgentHeader = "User-Agent" ) type logStream struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration multilinePattern *regexp.Regexp client api messages chan *logger.Message lock sync.RWMutex closed bool sequenceToken *string } type logStreamConfig struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration maxBufferedEvents int multilinePattern *regexp.Regexp } var _ logger.SizedLogger = &logStream{} type api interface { CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) } type regionFinder interface { Region() (string, error) } type wrappedEvent struct { inputLogEvent *cloudwatchlogs.InputLogEvent insertOrder int } type byTimestamp []wrappedEvent // init registers the awslogs driver func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // eventBatch holds the events that are batched for submission and the // associated data about it. // // Warning: this type is not threadsafe and must not be used // concurrently. This type is expected to be consumed in a single go // routine and never concurrently. type eventBatch struct { batch []wrappedEvent bytes int } // New creates an awslogs logger using the configuration passed in on the // context. Supported context configuration variables are awslogs-region, // awslogs-endpoint, awslogs-group, awslogs-stream, awslogs-create-group, // awslogs-multiline-pattern and awslogs-datetime-format. // When available, configuration is also taken from environment variables // AWS_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, the shared credentials // file (~/.aws/credentials), and the EC2 Instance Metadata Service. func New(info logger.Info) (logger.Logger, error) { containerStreamConfig, err := newStreamConfig(info) if err != nil { return nil, err } client, err := newAWSLogsClient(info) if err != nil { return nil, err } containerStream := &logStream{ logStreamName: containerStreamConfig.logStreamName, logGroupName: containerStreamConfig.logGroupName, logCreateGroup: containerStreamConfig.logCreateGroup, logCreateStream: containerStreamConfig.logCreateStream, logNonBlocking: containerStreamConfig.logNonBlocking, forceFlushInterval: containerStreamConfig.forceFlushInterval, multilinePattern: containerStreamConfig.multilinePattern, client: client, messages: make(chan *logger.Message, containerStreamConfig.maxBufferedEvents), } creationDone := make(chan bool) if containerStream.logNonBlocking { go func() { backoff := 1 maxBackoff := 32 for { // If logger is closed we are done containerStream.lock.RLock() if containerStream.closed { containerStream.lock.RUnlock() break } containerStream.lock.RUnlock() err := containerStream.create() if err == nil { break } time.Sleep(time.Duration(backoff) * time.Second) if backoff < maxBackoff { backoff *= 2 } logrus. WithError(err). WithField("container-id", info.ContainerID). WithField("container-name", info.ContainerName). Error("Error while trying to initialize awslogs. Retrying in: ", backoff, " seconds") } close(creationDone) }() } else { if err = containerStream.create(); err != nil { return nil, err } close(creationDone) } go containerStream.collectBatch(creationDone) return containerStream, nil } // Parses most of the awslogs- options and prepares a config object to be used for newing the actual stream // It has been formed out to ease Utest of the New above func newStreamConfig(info logger.Info) (*logStreamConfig, error) { logGroupName := info.Config[logGroupKey] logStreamName, err := loggerutils.ParseLogTag(info, "{{.FullID}}") if err != nil { return nil, err } logCreateGroup := false if info.Config[logCreateGroupKey] != "" { logCreateGroup, err = strconv.ParseBool(info.Config[logCreateGroupKey]) if err != nil { return nil, err } } logNonBlocking := info.Config["mode"] == "non-blocking" forceFlushInterval := defaultForceFlushInterval if info.Config[forceFlushIntervalKey] != "" { forceFlushIntervalAsInt, err := strconv.Atoi(info.Config[forceFlushIntervalKey]) if err != nil { return nil, err } forceFlushInterval = time.Duration(forceFlushIntervalAsInt) * time.Second } maxBufferedEvents := int(defaultMaxBufferedEvents) if info.Config[maxBufferedEventsKey] != "" { maxBufferedEvents, err = strconv.Atoi(info.Config[maxBufferedEventsKey]) if err != nil { return nil, err } } if info.Config[logStreamKey] != "" { logStreamName = info.Config[logStreamKey] } logCreateStream := true if info.Config[logCreateStreamKey] != "" { logCreateStream, err = strconv.ParseBool(info.Config[logCreateStreamKey]) if err != nil { return nil, err } } multilinePattern, err := parseMultilineOptions(info) if err != nil { return nil, err } containerStreamConfig := &logStreamConfig{ logStreamName: logStreamName, logGroupName: logGroupName, logCreateGroup: logCreateGroup, logCreateStream: logCreateStream, logNonBlocking: logNonBlocking, forceFlushInterval: forceFlushInterval, maxBufferedEvents: maxBufferedEvents, multilinePattern: multilinePattern, } return containerStreamConfig, nil } // Parses awslogs-multiline-pattern and awslogs-datetime-format options // If awslogs-datetime-format is present, convert the format from strftime // to regexp and return. // If awslogs-multiline-pattern is present, compile regexp and return func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) { dateTimeFormat := info.Config[datetimeFormatKey] multilinePatternKey := info.Config[multilinePatternKey] // strftime input is parsed into a regular expression if dateTimeFormat != "" { // %. matches each strftime format sequence and ReplaceAllStringFunc // looks up each format sequence in the conversion table strftimeToRegex // to replace with a defined regular expression r := regexp.MustCompile("%.") multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string { return strftimeToRegex[s] }) } if multilinePatternKey != "" { multilinePattern, err := regexp.Compile(multilinePatternKey) if err != nil { return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey) } return multilinePattern, nil } return nil, nil } // Maps strftime format strings to regex var strftimeToRegex = map[string]string{ /*weekdayShort */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`, /*weekdayFull */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`, /*weekdayZeroIndex */ `%w`: `[0-6]`, /*dayZeroPadded */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`, /*monthShort */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`, /*monthFull */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`, /*monthZeroPadded */ `%m`: `(?:0[1-9]|1[0-2])`, /*yearCentury */ `%Y`: `\d{4}`, /*yearZeroPadded */ `%y`: `\d{2}`, /*hour24ZeroPadded */ `%H`: `(?:[0,1][0-9]|2[0-3])`, /*hour12ZeroPadded */ `%I`: `(?:0[0-9]|1[0-2])`, /*AM or PM */ `%p`: "[A,P]M", /*minuteZeroPadded */ `%M`: `[0-5][0-9]`, /*secondZeroPadded */ `%S`: `[0-5][0-9]`, /*microsecondZeroPadded */ `%f`: `\d{6}`, /*utcOffset */ `%z`: `[+-]\d{4}`, /*tzName */ `%Z`: `[A-Z]{1,4}T`, /*dayOfYearZeroPadded */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`, /*milliseconds */ `%L`: `\.\d{3}`, } // newRegionFinder is a variable such that the implementation // can be swapped out for unit tests. var newRegionFinder = func() (regionFinder, error) { s, err := session.NewSession() if err != nil { return nil, err } return ec2metadata.New(s), nil } // newSDKEndpoint is a variable such that the implementation // can be swapped out for unit tests. var newSDKEndpoint = credentialsEndpoint // newAWSLogsClient creates the service client for Amazon CloudWatch Logs. // Customizations to the default client from the SDK include a Docker-specific // User-Agent string and automatic region detection using the EC2 Instance // Metadata Service when region is otherwise unspecified. func newAWSLogsClient(info logger.Info) (api, error) { var region, endpoint *string if os.Getenv(regionEnvKey) != "" { region = aws.String(os.Getenv(regionEnvKey)) } if info.Config[regionKey] != "" { region = aws.String(info.Config[regionKey]) } if info.Config[endpointKey] != "" { endpoint = aws.String(info.Config[endpointKey]) } if region == nil || *region == "" { logrus.Info("Trying to get region from EC2 Metadata") ec2MetadataClient, err := newRegionFinder() if err != nil { logrus.WithError(err).Error("could not create EC2 metadata client") return nil, errors.Wrap(err, "could not create EC2 metadata client") } r, err := ec2MetadataClient.Region() if err != nil { logrus.WithError(err).Error("Could not get region from EC2 metadata, environment, or log option") return nil, errors.New("Cannot determine region for awslogs driver") } region = &r } sess, err := session.NewSession() if err != nil { return nil, errors.New("Failed to create a service client session for awslogs driver") } // attach region to cloudwatchlogs config sess.Config.Region = region // attach endpoint to cloudwatchlogs config if endpoint != nil { sess.Config.Endpoint = endpoint } if uri, ok := info.Config[credentialsEndpointKey]; ok { logrus.Debugf("Trying to get credentials from awslogs-credentials-endpoint") endpoint := fmt.Sprintf("%s%s", newSDKEndpoint, uri) creds := endpointcreds.NewCredentialsClient(*sess.Config, sess.Handlers, endpoint, func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute }) // attach credentials to cloudwatchlogs config sess.Config.Credentials = creds } logrus.WithFields(logrus.Fields{ "region": *region, }).Debug("Created awslogs client") client := cloudwatchlogs.New(sess) client.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "DockerUserAgentHandler", Fn: func(r *request.Request) { currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) r.HTTPRequest.Header.Set(userAgentHeader, fmt.Sprintf("Docker %s (%s) %s", dockerversion.Version, runtime.GOOS, currentAgent)) }, }) return client, nil } // Name returns the name of the awslogs logging driver func (l *logStream) Name() string { return name } // BufSize returns the maximum bytes CloudWatch can handle. func (l *logStream) BufSize() int { return maximumBytesPerEvent } // Log submits messages for logging by an instance of the awslogs logging driver func (l *logStream) Log(msg *logger.Message) error { l.lock.RLock() defer l.lock.RUnlock() if l.closed { return errors.New("awslogs is closed") } if l.logNonBlocking { select { case l.messages <- msg: return nil default: return errors.New("awslogs buffer is full") } } l.messages <- msg return nil } // Close closes the instance of the awslogs logging driver func (l *logStream) Close() error { l.lock.Lock() defer l.lock.Unlock() if !l.closed { close(l.messages) } l.closed = true return nil } // create creates log group and log stream for the instance of the awslogs logging driver func (l *logStream) create() error { err := l.createLogStream() if err == nil { return nil } if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == resourceNotFoundCode && l.logCreateGroup { if err := l.createLogGroup(); err != nil { return errors.Wrap(err, "failed to create Cloudwatch log group") } err = l.createLogStream() if err == nil { return nil } } return errors.Wrap(err, "failed to create Cloudwatch log stream") } // createLogGroup creates a log group for the instance of the awslogs logging driver func (l *logStream) createLogGroup() error { if _, err := l.client.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ LogGroupName: aws.String(l.logGroupName), }); err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logCreateGroup": l.logCreateGroup, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log group already exists") return nil } logrus.WithFields(fields).Error("Failed to create log group") } return err } return nil } // createLogStream creates a log stream for the instance of the awslogs logging driver func (l *logStream) createLogStream() error { // Directly return if we do not want to create log stream. if !l.logCreateStream { logrus.WithFields(logrus.Fields{ "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, "logCreateStream": l.logCreateStream, }).Info("Skipping creating log stream") return nil } input := &cloudwatchlogs.CreateLogStreamInput{ LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } _, err := l.client.CreateLogStream(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log stream already exists") return nil } logrus.WithFields(fields).Error("Failed to create log stream") } } return err } // newTicker is used for time-based batching. newTicker is a variable such // that the implementation can be swapped out for unit tests. var newTicker = func(freq time.Duration) *time.Ticker { return time.NewTicker(freq) } // collectBatch executes as a goroutine to perform batching of log events for // submission to the log stream. If the awslogs-multiline-pattern or // awslogs-datetime-format options have been configured, multiline processing // is enabled, where log messages are stored in an event buffer until a multiline // pattern match is found, at which point the messages in the event buffer are // pushed to CloudWatch logs as a single log event. Multiline messages are processed // according to the maximumBytesPerPut constraint, and the implementation only // allows for messages to be buffered for a maximum of 2*batchPublishFrequency // seconds. When events are ready to be processed for submission to CloudWatch // Logs, the processEvents method is called. If a multiline pattern is not // configured, log events are submitted to the processEvents method immediately. func (l *logStream) collectBatch(created chan bool) { // Wait for the logstream/group to be created <-created flushInterval := l.forceFlushInterval if flushInterval <= 0 { flushInterval = defaultForceFlushInterval } ticker := newTicker(flushInterval) var eventBuffer []byte var eventBufferTimestamp int64 var batch = newEventBatch() for { select { case t := <-ticker.C: // If event buffer is older than batch publish frequency flush the event buffer if eventBufferTimestamp > 0 && len(eventBuffer) > 0 { eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp eventBufferExpired := eventBufferAge >= int64(flushInterval)/int64(time.Millisecond) eventBufferNegative := eventBufferAge < 0 if eventBufferExpired || eventBufferNegative { l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBuffer = eventBuffer[:0] } } l.publishBatch(batch) batch.reset() case msg, more := <-l.messages: if !more { // Flush event buffer and release resources l.processEvent(batch, eventBuffer, eventBufferTimestamp) l.publishBatch(batch) batch.reset() return } if eventBufferTimestamp == 0 { eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) } line := msg.Line if l.multilinePattern != nil { lineEffectiveLen := effectiveLen(string(line)) if l.multilinePattern.Match(line) || effectiveLen(string(eventBuffer))+lineEffectiveLen > maximumBytesPerEvent { // This is a new log event or we will exceed max bytes per event // so flush the current eventBuffer to events and reset timestamp l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) eventBuffer = eventBuffer[:0] } // Append newline if event is less than max event size if lineEffectiveLen < maximumBytesPerEvent { line = append(line, "\n"...) } eventBuffer = append(eventBuffer, line...) logger.PutMessage(msg) } else { l.processEvent(batch, line, msg.Timestamp.UnixNano()/int64(time.Millisecond)) logger.PutMessage(msg) } } } } // processEvent processes log events that are ready for submission to CloudWatch // logs. Batching is performed on time- and size-bases. Time-based batching // occurs at a 5 second interval (defined in the batchPublishFrequency const). // Size-based batching is performed on the maximum number of events per batch // (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a // batch (defined in maximumBytesPerPut). Log messages are split by the maximum // bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event // byte overhead (defined in perEventBytes) which is accounted for in split- and // batch-calculations. Because the events are interpreted as UTF-8 encoded // Unicode, invalid UTF-8 byte sequences are replaced with the Unicode // replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To // compensate for that and to avoid splitting valid UTF-8 characters into // invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. func (l *logStream) processEvent(batch *eventBatch, bytes []byte, timestamp int64) { for len(bytes) > 0 { // Split line length so it does not exceed the maximum splitOffset, lineBytes := findValidSplit(string(bytes), maximumBytesPerEvent) line := bytes[:splitOffset] event := wrappedEvent{ inputLogEvent: &cloudwatchlogs.InputLogEvent{ Message: aws.String(string(line)), Timestamp: aws.Int64(timestamp), }, insertOrder: batch.count(), } added := batch.add(event, lineBytes) if added { bytes = bytes[splitOffset:] } else { l.publishBatch(batch) batch.reset() } } } // effectiveLen counts the effective number of bytes in the string, after // UTF-8 normalization. UTF-8 normalization includes replacing bytes that do // not constitute valid UTF-8 encoded Unicode codepoints with the Unicode // replacement codepoint U+FFFD (a 3-byte UTF-8 sequence, represented in Go as // utf8.RuneError) func effectiveLen(line string) int { effectiveBytes := 0 for _, rune := range line { effectiveBytes += utf8.RuneLen(rune) } return effectiveBytes } // findValidSplit finds the byte offset to split a string without breaking valid // Unicode codepoints given a maximum number of total bytes. findValidSplit // returns the byte offset for splitting a string or []byte, as well as the // effective number of bytes if the string were normalized to replace invalid // UTF-8 encoded bytes with the Unicode replacement character (a 3-byte UTF-8 // sequence, represented in Go as utf8.RuneError) func findValidSplit(line string, maxBytes int) (splitOffset, effectiveBytes int) { for offset, rune := range line { splitOffset = offset if effectiveBytes+utf8.RuneLen(rune) > maxBytes { return splitOffset, effectiveBytes } effectiveBytes += utf8.RuneLen(rune) } splitOffset = len(line) return } // publishBatch calls PutLogEvents for a given set of InputLogEvents, // accounting for sequencing requirements (each request must reference the // sequence token returned by the previous request). func (l *logStream) publishBatch(batch *eventBatch) { if batch.isEmpty() { return } cwEvents := unwrapEvents(batch.events()) nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == dataAlreadyAcceptedCode { // already submitted, just grab the correct sequence token parts := strings.Split(awsErr.Message(), " ") nextSequenceToken = &parts[len(parts)-1] logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Info("Data already accepted, ignoring error") err = nil } else if awsErr.Code() == invalidSequenceTokenCode { // sequence code is bad, grab the correct one and retry parts := strings.Split(awsErr.Message(), " ") token := parts[len(parts)-1] nextSequenceToken, err = l.putLogEvents(cwEvents, &token) } } } if err != nil { logrus.Error(err) } else { l.sequenceToken = nextSequenceToken } } // putLogEvents wraps the PutLogEvents API func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { input := &cloudwatchlogs.PutLogEventsInput{ LogEvents: events, SequenceToken: sequenceToken, LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } resp, err := l.client.PutLogEvents(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Error("Failed to put log events") } return nil, err } return resp.NextSequenceToken, nil } // ValidateLogOpt looks for awslogs-specific log options awslogs-region, awslogs-endpoint // awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format, // awslogs-multiline-pattern func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case logGroupKey: case logStreamKey: case logCreateGroupKey: case regionKey: case endpointKey: case tagKey: case datetimeFormatKey: case multilinePatternKey: case credentialsEndpointKey: case forceFlushIntervalKey: case maxBufferedEventsKey: default: return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) } } if cfg[logGroupKey] == "" { return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) } if cfg[logCreateGroupKey] != "" { if _, err := strconv.ParseBool(cfg[logCreateGroupKey]); err != nil { return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err) } } if cfg[forceFlushIntervalKey] != "" { if value, err := strconv.Atoi(cfg[forceFlushIntervalKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", forceFlushIntervalKey, cfg[forceFlushIntervalKey]) } } if cfg[maxBufferedEventsKey] != "" { if value, err := strconv.Atoi(cfg[maxBufferedEventsKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", maxBufferedEventsKey, cfg[maxBufferedEventsKey]) } } _, datetimeFormatKeyExists := cfg[datetimeFormatKey] _, multilinePatternKeyExists := cfg[multilinePatternKey] if datetimeFormatKeyExists && multilinePatternKeyExists { return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey) } return nil } // Len returns the length of a byTimestamp slice. Len is required by the // sort.Interface interface. func (slice byTimestamp) Len() int { return len(slice) } // Less compares two values in a byTimestamp slice by Timestamp. Less is // required by the sort.Interface interface. func (slice byTimestamp) Less(i, j int) bool { iTimestamp, jTimestamp := int64(0), int64(0) if slice != nil && slice[i].inputLogEvent.Timestamp != nil { iTimestamp = *slice[i].inputLogEvent.Timestamp } if slice != nil && slice[j].inputLogEvent.Timestamp != nil { jTimestamp = *slice[j].inputLogEvent.Timestamp } if iTimestamp == jTimestamp { return slice[i].insertOrder < slice[j].insertOrder } return iTimestamp < jTimestamp } // Swap swaps two values in a byTimestamp slice with each other. Swap is // required by the sort.Interface interface. func (slice byTimestamp) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events)) for i, input := range events { cwEvents[i] = input.inputLogEvent } return cwEvents } func newEventBatch() *eventBatch { return &eventBatch{ batch: make([]wrappedEvent, 0), bytes: 0, } } // events returns a slice of wrappedEvents sorted in order of their // timestamps and then by their insertion order (see `byTimestamp`). // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) events() []wrappedEvent { sort.Sort(byTimestamp(b.batch)) return b.batch } // add adds an event to the batch of events accounting for the // necessary overhead for an event to be logged. An error will be // returned if the event cannot be added to the batch due to service // limits. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) add(event wrappedEvent, size int) bool { addBytes := size + perEventBytes // verify we are still within service limits switch { case len(b.batch)+1 > maximumLogEventsPerPut: return false case b.bytes+addBytes > maximumBytesPerPut: return false } b.bytes += addBytes b.batch = append(b.batch, event) return true } // count is the number of batched events. Warning: this method // is not threadsafe and must not be used concurrently. func (b *eventBatch) count() int { return len(b.batch) } // size is the total number of bytes that the batch represents. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) size() int { return b.bytes } func (b *eventBatch) isEmpty() bool { zeroEvents := b.count() == 0 zeroSize := b.size() == 0 return zeroEvents && zeroSize } // reset prepares the batch for reuse. func (b *eventBatch) reset() { b.bytes = 0 b.batch = b.batch[:0] }
// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs package awslogs // import "github.com/docker/docker/daemon/logger/awslogs" import ( "fmt" "os" "regexp" "runtime" "sort" "strconv" "strings" "sync" "time" "unicode/utf8" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/dockerversion" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( name = "awslogs" regionKey = "awslogs-region" endpointKey = "awslogs-endpoint" regionEnvKey = "AWS_REGION" logGroupKey = "awslogs-group" logStreamKey = "awslogs-stream" logCreateGroupKey = "awslogs-create-group" logCreateStreamKey = "awslogs-create-stream" tagKey = "tag" datetimeFormatKey = "awslogs-datetime-format" multilinePatternKey = "awslogs-multiline-pattern" credentialsEndpointKey = "awslogs-credentials-endpoint" forceFlushIntervalKey = "awslogs-force-flush-interval-seconds" maxBufferedEventsKey = "awslogs-max-buffered-events" logFormatKey = "awslogs-format" defaultForceFlushInterval = 5 * time.Second defaultMaxBufferedEvents = 4096 // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html perEventBytes = 26 maximumBytesPerPut = 1048576 maximumLogEventsPerPut = 10000 // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html // Because the events are interpreted as UTF-8 encoded Unicode, invalid UTF-8 byte sequences are replaced with the // Unicode replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To compensate for that and to avoid // splitting valid UTF-8 characters into invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. maximumBytesPerEvent = 262144 - perEventBytes resourceAlreadyExistsCode = "ResourceAlreadyExistsException" dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" invalidSequenceTokenCode = "InvalidSequenceTokenException" resourceNotFoundCode = "ResourceNotFoundException" credentialsEndpoint = "http://169.254.170.2" userAgentHeader = "User-Agent" // See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format_Specification.html logsFormatHeader = "x-amzn-logs-format" jsonEmfLogFormat = "json/emf" ) type logStream struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration multilinePattern *regexp.Regexp client api messages chan *logger.Message lock sync.RWMutex closed bool sequenceToken *string } type logStreamConfig struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration maxBufferedEvents int multilinePattern *regexp.Regexp } var _ logger.SizedLogger = &logStream{} type api interface { CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) } type regionFinder interface { Region() (string, error) } type wrappedEvent struct { inputLogEvent *cloudwatchlogs.InputLogEvent insertOrder int } type byTimestamp []wrappedEvent // init registers the awslogs driver func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // eventBatch holds the events that are batched for submission and the // associated data about it. // // Warning: this type is not threadsafe and must not be used // concurrently. This type is expected to be consumed in a single go // routine and never concurrently. type eventBatch struct { batch []wrappedEvent bytes int } // New creates an awslogs logger using the configuration passed in on the // context. Supported context configuration variables are awslogs-region, // awslogs-endpoint, awslogs-group, awslogs-stream, awslogs-create-group, // awslogs-multiline-pattern and awslogs-datetime-format. // When available, configuration is also taken from environment variables // AWS_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, the shared credentials // file (~/.aws/credentials), and the EC2 Instance Metadata Service. func New(info logger.Info) (logger.Logger, error) { containerStreamConfig, err := newStreamConfig(info) if err != nil { return nil, err } client, err := newAWSLogsClient(info) if err != nil { return nil, err } containerStream := &logStream{ logStreamName: containerStreamConfig.logStreamName, logGroupName: containerStreamConfig.logGroupName, logCreateGroup: containerStreamConfig.logCreateGroup, logCreateStream: containerStreamConfig.logCreateStream, logNonBlocking: containerStreamConfig.logNonBlocking, forceFlushInterval: containerStreamConfig.forceFlushInterval, multilinePattern: containerStreamConfig.multilinePattern, client: client, messages: make(chan *logger.Message, containerStreamConfig.maxBufferedEvents), } creationDone := make(chan bool) if containerStream.logNonBlocking { go func() { backoff := 1 maxBackoff := 32 for { // If logger is closed we are done containerStream.lock.RLock() if containerStream.closed { containerStream.lock.RUnlock() break } containerStream.lock.RUnlock() err := containerStream.create() if err == nil { break } time.Sleep(time.Duration(backoff) * time.Second) if backoff < maxBackoff { backoff *= 2 } logrus. WithError(err). WithField("container-id", info.ContainerID). WithField("container-name", info.ContainerName). Error("Error while trying to initialize awslogs. Retrying in: ", backoff, " seconds") } close(creationDone) }() } else { if err = containerStream.create(); err != nil { return nil, err } close(creationDone) } go containerStream.collectBatch(creationDone) return containerStream, nil } // Parses most of the awslogs- options and prepares a config object to be used for newing the actual stream // It has been formed out to ease Utest of the New above func newStreamConfig(info logger.Info) (*logStreamConfig, error) { logGroupName := info.Config[logGroupKey] logStreamName, err := loggerutils.ParseLogTag(info, "{{.FullID}}") if err != nil { return nil, err } logCreateGroup := false if info.Config[logCreateGroupKey] != "" { logCreateGroup, err = strconv.ParseBool(info.Config[logCreateGroupKey]) if err != nil { return nil, err } } logNonBlocking := info.Config["mode"] == "non-blocking" forceFlushInterval := defaultForceFlushInterval if info.Config[forceFlushIntervalKey] != "" { forceFlushIntervalAsInt, err := strconv.Atoi(info.Config[forceFlushIntervalKey]) if err != nil { return nil, err } forceFlushInterval = time.Duration(forceFlushIntervalAsInt) * time.Second } maxBufferedEvents := int(defaultMaxBufferedEvents) if info.Config[maxBufferedEventsKey] != "" { maxBufferedEvents, err = strconv.Atoi(info.Config[maxBufferedEventsKey]) if err != nil { return nil, err } } if info.Config[logStreamKey] != "" { logStreamName = info.Config[logStreamKey] } logCreateStream := true if info.Config[logCreateStreamKey] != "" { logCreateStream, err = strconv.ParseBool(info.Config[logCreateStreamKey]) if err != nil { return nil, err } } multilinePattern, err := parseMultilineOptions(info) if err != nil { return nil, err } containerStreamConfig := &logStreamConfig{ logStreamName: logStreamName, logGroupName: logGroupName, logCreateGroup: logCreateGroup, logCreateStream: logCreateStream, logNonBlocking: logNonBlocking, forceFlushInterval: forceFlushInterval, maxBufferedEvents: maxBufferedEvents, multilinePattern: multilinePattern, } return containerStreamConfig, nil } // Parses awslogs-multiline-pattern and awslogs-datetime-format options // If awslogs-datetime-format is present, convert the format from strftime // to regexp and return. // If awslogs-multiline-pattern is present, compile regexp and return func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) { dateTimeFormat := info.Config[datetimeFormatKey] multilinePatternKey := info.Config[multilinePatternKey] // strftime input is parsed into a regular expression if dateTimeFormat != "" { // %. matches each strftime format sequence and ReplaceAllStringFunc // looks up each format sequence in the conversion table strftimeToRegex // to replace with a defined regular expression r := regexp.MustCompile("%.") multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string { return strftimeToRegex[s] }) } if multilinePatternKey != "" { multilinePattern, err := regexp.Compile(multilinePatternKey) if err != nil { return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey) } return multilinePattern, nil } return nil, nil } // Maps strftime format strings to regex var strftimeToRegex = map[string]string{ /*weekdayShort */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`, /*weekdayFull */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`, /*weekdayZeroIndex */ `%w`: `[0-6]`, /*dayZeroPadded */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`, /*monthShort */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`, /*monthFull */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`, /*monthZeroPadded */ `%m`: `(?:0[1-9]|1[0-2])`, /*yearCentury */ `%Y`: `\d{4}`, /*yearZeroPadded */ `%y`: `\d{2}`, /*hour24ZeroPadded */ `%H`: `(?:[0,1][0-9]|2[0-3])`, /*hour12ZeroPadded */ `%I`: `(?:0[0-9]|1[0-2])`, /*AM or PM */ `%p`: "[A,P]M", /*minuteZeroPadded */ `%M`: `[0-5][0-9]`, /*secondZeroPadded */ `%S`: `[0-5][0-9]`, /*microsecondZeroPadded */ `%f`: `\d{6}`, /*utcOffset */ `%z`: `[+-]\d{4}`, /*tzName */ `%Z`: `[A-Z]{1,4}T`, /*dayOfYearZeroPadded */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`, /*milliseconds */ `%L`: `\.\d{3}`, } // newRegionFinder is a variable such that the implementation // can be swapped out for unit tests. var newRegionFinder = func() (regionFinder, error) { s, err := session.NewSession() if err != nil { return nil, err } return ec2metadata.New(s), nil } // newSDKEndpoint is a variable such that the implementation // can be swapped out for unit tests. var newSDKEndpoint = credentialsEndpoint // newAWSLogsClient creates the service client for Amazon CloudWatch Logs. // Customizations to the default client from the SDK include a Docker-specific // User-Agent string and automatic region detection using the EC2 Instance // Metadata Service when region is otherwise unspecified. func newAWSLogsClient(info logger.Info) (api, error) { var region, endpoint *string if os.Getenv(regionEnvKey) != "" { region = aws.String(os.Getenv(regionEnvKey)) } if info.Config[regionKey] != "" { region = aws.String(info.Config[regionKey]) } if info.Config[endpointKey] != "" { endpoint = aws.String(info.Config[endpointKey]) } if region == nil || *region == "" { logrus.Info("Trying to get region from EC2 Metadata") ec2MetadataClient, err := newRegionFinder() if err != nil { logrus.WithError(err).Error("could not create EC2 metadata client") return nil, errors.Wrap(err, "could not create EC2 metadata client") } r, err := ec2MetadataClient.Region() if err != nil { logrus.WithError(err).Error("Could not get region from EC2 metadata, environment, or log option") return nil, errors.New("Cannot determine region for awslogs driver") } region = &r } sess, err := session.NewSession() if err != nil { return nil, errors.New("Failed to create a service client session for awslogs driver") } // attach region to cloudwatchlogs config sess.Config.Region = region // attach endpoint to cloudwatchlogs config if endpoint != nil { sess.Config.Endpoint = endpoint } if uri, ok := info.Config[credentialsEndpointKey]; ok { logrus.Debugf("Trying to get credentials from awslogs-credentials-endpoint") endpoint := fmt.Sprintf("%s%s", newSDKEndpoint, uri) creds := endpointcreds.NewCredentialsClient(*sess.Config, sess.Handlers, endpoint, func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute }) // attach credentials to cloudwatchlogs config sess.Config.Credentials = creds } logrus.WithFields(logrus.Fields{ "region": *region, }).Debug("Created awslogs client") client := cloudwatchlogs.New(sess) client.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "DockerUserAgentHandler", Fn: func(r *request.Request) { currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) r.HTTPRequest.Header.Set(userAgentHeader, fmt.Sprintf("Docker %s (%s) %s", dockerversion.Version, runtime.GOOS, currentAgent)) }, }) if info.Config[logFormatKey] != "" { client.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "LogFormatHeaderHandler", Fn: func(req *request.Request) { req.HTTPRequest.Header.Set(logsFormatHeader, info.Config[logFormatKey]) }, }) } return client, nil } // Name returns the name of the awslogs logging driver func (l *logStream) Name() string { return name } // BufSize returns the maximum bytes CloudWatch can handle. func (l *logStream) BufSize() int { return maximumBytesPerEvent } // Log submits messages for logging by an instance of the awslogs logging driver func (l *logStream) Log(msg *logger.Message) error { l.lock.RLock() defer l.lock.RUnlock() if l.closed { return errors.New("awslogs is closed") } if l.logNonBlocking { select { case l.messages <- msg: return nil default: return errors.New("awslogs buffer is full") } } l.messages <- msg return nil } // Close closes the instance of the awslogs logging driver func (l *logStream) Close() error { l.lock.Lock() defer l.lock.Unlock() if !l.closed { close(l.messages) } l.closed = true return nil } // create creates log group and log stream for the instance of the awslogs logging driver func (l *logStream) create() error { err := l.createLogStream() if err == nil { return nil } if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == resourceNotFoundCode && l.logCreateGroup { if err := l.createLogGroup(); err != nil { return errors.Wrap(err, "failed to create Cloudwatch log group") } err = l.createLogStream() if err == nil { return nil } } return errors.Wrap(err, "failed to create Cloudwatch log stream") } // createLogGroup creates a log group for the instance of the awslogs logging driver func (l *logStream) createLogGroup() error { if _, err := l.client.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ LogGroupName: aws.String(l.logGroupName), }); err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logCreateGroup": l.logCreateGroup, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log group already exists") return nil } logrus.WithFields(fields).Error("Failed to create log group") } return err } return nil } // createLogStream creates a log stream for the instance of the awslogs logging driver func (l *logStream) createLogStream() error { // Directly return if we do not want to create log stream. if !l.logCreateStream { logrus.WithFields(logrus.Fields{ "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, "logCreateStream": l.logCreateStream, }).Info("Skipping creating log stream") return nil } input := &cloudwatchlogs.CreateLogStreamInput{ LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } _, err := l.client.CreateLogStream(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log stream already exists") return nil } logrus.WithFields(fields).Error("Failed to create log stream") } } return err } // newTicker is used for time-based batching. newTicker is a variable such // that the implementation can be swapped out for unit tests. var newTicker = func(freq time.Duration) *time.Ticker { return time.NewTicker(freq) } // collectBatch executes as a goroutine to perform batching of log events for // submission to the log stream. If the awslogs-multiline-pattern or // awslogs-datetime-format options have been configured, multiline processing // is enabled, where log messages are stored in an event buffer until a multiline // pattern match is found, at which point the messages in the event buffer are // pushed to CloudWatch logs as a single log event. Multiline messages are processed // according to the maximumBytesPerPut constraint, and the implementation only // allows for messages to be buffered for a maximum of 2*batchPublishFrequency // seconds. When events are ready to be processed for submission to CloudWatch // Logs, the processEvents method is called. If a multiline pattern is not // configured, log events are submitted to the processEvents method immediately. func (l *logStream) collectBatch(created chan bool) { // Wait for the logstream/group to be created <-created flushInterval := l.forceFlushInterval if flushInterval <= 0 { flushInterval = defaultForceFlushInterval } ticker := newTicker(flushInterval) var eventBuffer []byte var eventBufferTimestamp int64 var batch = newEventBatch() for { select { case t := <-ticker.C: // If event buffer is older than batch publish frequency flush the event buffer if eventBufferTimestamp > 0 && len(eventBuffer) > 0 { eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp eventBufferExpired := eventBufferAge >= int64(flushInterval)/int64(time.Millisecond) eventBufferNegative := eventBufferAge < 0 if eventBufferExpired || eventBufferNegative { l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBuffer = eventBuffer[:0] } } l.publishBatch(batch) batch.reset() case msg, more := <-l.messages: if !more { // Flush event buffer and release resources l.processEvent(batch, eventBuffer, eventBufferTimestamp) l.publishBatch(batch) batch.reset() return } if eventBufferTimestamp == 0 { eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) } line := msg.Line if l.multilinePattern != nil { lineEffectiveLen := effectiveLen(string(line)) if l.multilinePattern.Match(line) || effectiveLen(string(eventBuffer))+lineEffectiveLen > maximumBytesPerEvent { // This is a new log event or we will exceed max bytes per event // so flush the current eventBuffer to events and reset timestamp l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) eventBuffer = eventBuffer[:0] } // Append newline if event is less than max event size if lineEffectiveLen < maximumBytesPerEvent { line = append(line, "\n"...) } eventBuffer = append(eventBuffer, line...) logger.PutMessage(msg) } else { l.processEvent(batch, line, msg.Timestamp.UnixNano()/int64(time.Millisecond)) logger.PutMessage(msg) } } } } // processEvent processes log events that are ready for submission to CloudWatch // logs. Batching is performed on time- and size-bases. Time-based batching // occurs at a 5 second interval (defined in the batchPublishFrequency const). // Size-based batching is performed on the maximum number of events per batch // (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a // batch (defined in maximumBytesPerPut). Log messages are split by the maximum // bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event // byte overhead (defined in perEventBytes) which is accounted for in split- and // batch-calculations. Because the events are interpreted as UTF-8 encoded // Unicode, invalid UTF-8 byte sequences are replaced with the Unicode // replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To // compensate for that and to avoid splitting valid UTF-8 characters into // invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. func (l *logStream) processEvent(batch *eventBatch, bytes []byte, timestamp int64) { for len(bytes) > 0 { // Split line length so it does not exceed the maximum splitOffset, lineBytes := findValidSplit(string(bytes), maximumBytesPerEvent) line := bytes[:splitOffset] event := wrappedEvent{ inputLogEvent: &cloudwatchlogs.InputLogEvent{ Message: aws.String(string(line)), Timestamp: aws.Int64(timestamp), }, insertOrder: batch.count(), } added := batch.add(event, lineBytes) if added { bytes = bytes[splitOffset:] } else { l.publishBatch(batch) batch.reset() } } } // effectiveLen counts the effective number of bytes in the string, after // UTF-8 normalization. UTF-8 normalization includes replacing bytes that do // not constitute valid UTF-8 encoded Unicode codepoints with the Unicode // replacement codepoint U+FFFD (a 3-byte UTF-8 sequence, represented in Go as // utf8.RuneError) func effectiveLen(line string) int { effectiveBytes := 0 for _, rune := range line { effectiveBytes += utf8.RuneLen(rune) } return effectiveBytes } // findValidSplit finds the byte offset to split a string without breaking valid // Unicode codepoints given a maximum number of total bytes. findValidSplit // returns the byte offset for splitting a string or []byte, as well as the // effective number of bytes if the string were normalized to replace invalid // UTF-8 encoded bytes with the Unicode replacement character (a 3-byte UTF-8 // sequence, represented in Go as utf8.RuneError) func findValidSplit(line string, maxBytes int) (splitOffset, effectiveBytes int) { for offset, rune := range line { splitOffset = offset if effectiveBytes+utf8.RuneLen(rune) > maxBytes { return splitOffset, effectiveBytes } effectiveBytes += utf8.RuneLen(rune) } splitOffset = len(line) return } // publishBatch calls PutLogEvents for a given set of InputLogEvents, // accounting for sequencing requirements (each request must reference the // sequence token returned by the previous request). func (l *logStream) publishBatch(batch *eventBatch) { if batch.isEmpty() { return } cwEvents := unwrapEvents(batch.events()) nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == dataAlreadyAcceptedCode { // already submitted, just grab the correct sequence token parts := strings.Split(awsErr.Message(), " ") nextSequenceToken = &parts[len(parts)-1] logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Info("Data already accepted, ignoring error") err = nil } else if awsErr.Code() == invalidSequenceTokenCode { // sequence code is bad, grab the correct one and retry parts := strings.Split(awsErr.Message(), " ") token := parts[len(parts)-1] nextSequenceToken, err = l.putLogEvents(cwEvents, &token) } } } if err != nil { logrus.Error(err) } else { l.sequenceToken = nextSequenceToken } } // putLogEvents wraps the PutLogEvents API func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { input := &cloudwatchlogs.PutLogEventsInput{ LogEvents: events, SequenceToken: sequenceToken, LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } resp, err := l.client.PutLogEvents(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Error("Failed to put log events") } return nil, err } return resp.NextSequenceToken, nil } // ValidateLogOpt looks for awslogs-specific log options awslogs-region, awslogs-endpoint // awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format, // awslogs-multiline-pattern func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case logGroupKey: case logStreamKey: case logCreateGroupKey: case regionKey: case endpointKey: case tagKey: case datetimeFormatKey: case multilinePatternKey: case credentialsEndpointKey: case forceFlushIntervalKey: case maxBufferedEventsKey: case logFormatKey: default: return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) } } if cfg[logGroupKey] == "" { return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) } if cfg[logCreateGroupKey] != "" { if _, err := strconv.ParseBool(cfg[logCreateGroupKey]); err != nil { return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err) } } if cfg[forceFlushIntervalKey] != "" { if value, err := strconv.Atoi(cfg[forceFlushIntervalKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", forceFlushIntervalKey, cfg[forceFlushIntervalKey]) } } if cfg[maxBufferedEventsKey] != "" { if value, err := strconv.Atoi(cfg[maxBufferedEventsKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", maxBufferedEventsKey, cfg[maxBufferedEventsKey]) } } _, datetimeFormatKeyExists := cfg[datetimeFormatKey] _, multilinePatternKeyExists := cfg[multilinePatternKey] if datetimeFormatKeyExists && multilinePatternKeyExists { return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey) } if cfg[logFormatKey] != "" { // For now, only the "json/emf" log format is supported if cfg[logFormatKey] != jsonEmfLogFormat { return fmt.Errorf("unsupported log format '%s'", cfg[logFormatKey]) } if datetimeFormatKeyExists || multilinePatternKeyExists { return fmt.Errorf("you cannot configure log opt '%s' or '%s' when log opt '%s' is set to '%s'", datetimeFormatKey, multilinePatternKey, logFormatKey, jsonEmfLogFormat) } } return nil } // Len returns the length of a byTimestamp slice. Len is required by the // sort.Interface interface. func (slice byTimestamp) Len() int { return len(slice) } // Less compares two values in a byTimestamp slice by Timestamp. Less is // required by the sort.Interface interface. func (slice byTimestamp) Less(i, j int) bool { iTimestamp, jTimestamp := int64(0), int64(0) if slice != nil && slice[i].inputLogEvent.Timestamp != nil { iTimestamp = *slice[i].inputLogEvent.Timestamp } if slice != nil && slice[j].inputLogEvent.Timestamp != nil { jTimestamp = *slice[j].inputLogEvent.Timestamp } if iTimestamp == jTimestamp { return slice[i].insertOrder < slice[j].insertOrder } return iTimestamp < jTimestamp } // Swap swaps two values in a byTimestamp slice with each other. Swap is // required by the sort.Interface interface. func (slice byTimestamp) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events)) for i, input := range events { cwEvents[i] = input.inputLogEvent } return cwEvents } func newEventBatch() *eventBatch { return &eventBatch{ batch: make([]wrappedEvent, 0), bytes: 0, } } // events returns a slice of wrappedEvents sorted in order of their // timestamps and then by their insertion order (see `byTimestamp`). // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) events() []wrappedEvent { sort.Sort(byTimestamp(b.batch)) return b.batch } // add adds an event to the batch of events accounting for the // necessary overhead for an event to be logged. An error will be // returned if the event cannot be added to the batch due to service // limits. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) add(event wrappedEvent, size int) bool { addBytes := size + perEventBytes // verify we are still within service limits switch { case len(b.batch)+1 > maximumLogEventsPerPut: return false case b.bytes+addBytes > maximumBytesPerPut: return false } b.bytes += addBytes b.batch = append(b.batch, event) return true } // count is the number of batched events. Warning: this method // is not threadsafe and must not be used concurrently. func (b *eventBatch) count() int { return len(b.batch) } // size is the total number of bytes that the batch represents. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) size() int { return b.bytes } func (b *eventBatch) isEmpty() bool { zeroEvents := b.count() == 0 zeroSize := b.size() == 0 return zeroEvents && zeroSize } // reset prepares the batch for reuse. func (b *eventBatch) reset() { b.bytes = 0 b.batch = b.batch[:0] }
sanjams2
b46ab1f57997feed98aa2cccd8e4f5a612b963bc
787b8fe14f34f04f723bd0913f293a59be428fe6
Yeah that makes sense. I actually had it as `awslogs-format` at first but then changed it and now I cant quite remember why? I think my thinking was that `awslogs-` was the common parameter prefix and that the parameter name was actually `log-format`. Maybe because there could be another type of format, so just plain `format` with the prefix felt slightly generic. But I think it's probably best to just use `awslogs-format` as you suggest.
sanjams2
4,450
moby/moby
42,838
Add an option to specify log format for awslogs driver
Added an option 'awslogs-format' to allow specifying a log format for the logs sent CloudWatch from the aws log driver. For now, only the 'json/emf' format is supported. If no option is provided, the log format header in the request to CloudWatch will be omitted as before. Signed-off-by: James Sanders <[email protected]> **- What I did** - Added an option `awslogs-format` to allow specifying a log format for the logs sent CloudWatch from the aws log driver. - For now, only the `json/emf` format is supported. - If no option is provided, the log format header in the request to CloudWatch will be omitted as before. - This addresses https://github.com/moby/moby/issues/42731 - Users cannot provide the `awslogs-datetime-format` or the `awslogs-multiline-pattern` options when they set the `awslogs-format` option cannot be set to `json/emf`. This is because both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options modify the log line delimiter. Given for now `json/emf` format must be a valid json line, it does not make sense to allow for different line delimiters when using `json/emf`. **- How I did it** - Modified the `awslogs` log driver to add a new CloudWatch client "handler" (basically middleware) to add the `x-amzn-logs-format` header to the request when the log format option is given and is `json/emf`. - Tried to follow convention at every step of the way. - Based key name off of the key name used in the [fluentd plugin](https://docs.fluentbit.io/manual/pipeline/outputs/cloudwatch) **- How to verify it** - Unit Testing - Manual integration testing: - Ran container with no format option provided and verified logs were still published (no regression) - Ran container in development environment with `--log-opt awslogs-format=json/emf` enabled and saw metrics posted to CloudWatch: ```bash docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format=json/emf \ emf-logger \ run ``` - Metrics in CloudWatch: <img width="1091" alt="image" src="https://user-images.githubusercontent.com/10730172/132904614-a4c05775-a495-449c-9468-308edf9a0aa6.png"> - Attempted to specify a "random" log format and saw an error was thrown ```bash # docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format=random \ emf-logger \ run docker: Error response from daemon: unsupported log format 'random'. ``` - Attempted to set `awslogs-datetime-format` and saw an error was thrown ```bash docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format='json/emf' \ --log-opt awslogs-datetime-format='YYYY-mm-dd' \ emf-logger \ serve docker: Error response from daemon: you cannot configure log opt 'awslogs-datetime-format' or 'awslogs-multiline-pattern' when log opt 'awslogs-format' is set to 'json/emf'. ``` - Attempted to set `awslogs-multiline-pattern` and saw an error was thrown ```bash docker run --rm \ --log-driver awslogs \ --log-opt awslogs-region=us-west-2 \ --log-opt awslogs-group=emf-logger-test-container \ --log-opt awslogs-create-group=true \ --log-opt awslogs-format='json/emf' \ --log-opt awslogs-multiline-pattern='-------' \ emf-logger \ serve docker: Error response from daemon: you cannot configure log opt 'awslogs-datetime-format' or 'awslogs-multiline-pattern' when log opt 'awslogs-format' is set to 'json/emf'. ``` **- Description for the changelog** Added a new option to `awslogs` log driver to specify the log format that is sent to CloudWatch. **- A picture of a cute animal (not mandatory but encouraged)** ![image](https://user-images.githubusercontent.com/10730172/132908691-81dd1525-5133-4ac2-938e-b1e7d84e30c9.png)
null
2021-09-10 20:09:50+00:00
2021-12-02 19:48:07+00:00
daemon/logger/awslogs/cloudwatchlogs.go
// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs package awslogs // import "github.com/docker/docker/daemon/logger/awslogs" import ( "fmt" "os" "regexp" "runtime" "sort" "strconv" "strings" "sync" "time" "unicode/utf8" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/dockerversion" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( name = "awslogs" regionKey = "awslogs-region" endpointKey = "awslogs-endpoint" regionEnvKey = "AWS_REGION" logGroupKey = "awslogs-group" logStreamKey = "awslogs-stream" logCreateGroupKey = "awslogs-create-group" logCreateStreamKey = "awslogs-create-stream" tagKey = "tag" datetimeFormatKey = "awslogs-datetime-format" multilinePatternKey = "awslogs-multiline-pattern" credentialsEndpointKey = "awslogs-credentials-endpoint" forceFlushIntervalKey = "awslogs-force-flush-interval-seconds" maxBufferedEventsKey = "awslogs-max-buffered-events" defaultForceFlushInterval = 5 * time.Second defaultMaxBufferedEvents = 4096 // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html perEventBytes = 26 maximumBytesPerPut = 1048576 maximumLogEventsPerPut = 10000 // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html // Because the events are interpreted as UTF-8 encoded Unicode, invalid UTF-8 byte sequences are replaced with the // Unicode replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To compensate for that and to avoid // splitting valid UTF-8 characters into invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. maximumBytesPerEvent = 262144 - perEventBytes resourceAlreadyExistsCode = "ResourceAlreadyExistsException" dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" invalidSequenceTokenCode = "InvalidSequenceTokenException" resourceNotFoundCode = "ResourceNotFoundException" credentialsEndpoint = "http://169.254.170.2" userAgentHeader = "User-Agent" ) type logStream struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration multilinePattern *regexp.Regexp client api messages chan *logger.Message lock sync.RWMutex closed bool sequenceToken *string } type logStreamConfig struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration maxBufferedEvents int multilinePattern *regexp.Regexp } var _ logger.SizedLogger = &logStream{} type api interface { CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) } type regionFinder interface { Region() (string, error) } type wrappedEvent struct { inputLogEvent *cloudwatchlogs.InputLogEvent insertOrder int } type byTimestamp []wrappedEvent // init registers the awslogs driver func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // eventBatch holds the events that are batched for submission and the // associated data about it. // // Warning: this type is not threadsafe and must not be used // concurrently. This type is expected to be consumed in a single go // routine and never concurrently. type eventBatch struct { batch []wrappedEvent bytes int } // New creates an awslogs logger using the configuration passed in on the // context. Supported context configuration variables are awslogs-region, // awslogs-endpoint, awslogs-group, awslogs-stream, awslogs-create-group, // awslogs-multiline-pattern and awslogs-datetime-format. // When available, configuration is also taken from environment variables // AWS_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, the shared credentials // file (~/.aws/credentials), and the EC2 Instance Metadata Service. func New(info logger.Info) (logger.Logger, error) { containerStreamConfig, err := newStreamConfig(info) if err != nil { return nil, err } client, err := newAWSLogsClient(info) if err != nil { return nil, err } containerStream := &logStream{ logStreamName: containerStreamConfig.logStreamName, logGroupName: containerStreamConfig.logGroupName, logCreateGroup: containerStreamConfig.logCreateGroup, logCreateStream: containerStreamConfig.logCreateStream, logNonBlocking: containerStreamConfig.logNonBlocking, forceFlushInterval: containerStreamConfig.forceFlushInterval, multilinePattern: containerStreamConfig.multilinePattern, client: client, messages: make(chan *logger.Message, containerStreamConfig.maxBufferedEvents), } creationDone := make(chan bool) if containerStream.logNonBlocking { go func() { backoff := 1 maxBackoff := 32 for { // If logger is closed we are done containerStream.lock.RLock() if containerStream.closed { containerStream.lock.RUnlock() break } containerStream.lock.RUnlock() err := containerStream.create() if err == nil { break } time.Sleep(time.Duration(backoff) * time.Second) if backoff < maxBackoff { backoff *= 2 } logrus. WithError(err). WithField("container-id", info.ContainerID). WithField("container-name", info.ContainerName). Error("Error while trying to initialize awslogs. Retrying in: ", backoff, " seconds") } close(creationDone) }() } else { if err = containerStream.create(); err != nil { return nil, err } close(creationDone) } go containerStream.collectBatch(creationDone) return containerStream, nil } // Parses most of the awslogs- options and prepares a config object to be used for newing the actual stream // It has been formed out to ease Utest of the New above func newStreamConfig(info logger.Info) (*logStreamConfig, error) { logGroupName := info.Config[logGroupKey] logStreamName, err := loggerutils.ParseLogTag(info, "{{.FullID}}") if err != nil { return nil, err } logCreateGroup := false if info.Config[logCreateGroupKey] != "" { logCreateGroup, err = strconv.ParseBool(info.Config[logCreateGroupKey]) if err != nil { return nil, err } } logNonBlocking := info.Config["mode"] == "non-blocking" forceFlushInterval := defaultForceFlushInterval if info.Config[forceFlushIntervalKey] != "" { forceFlushIntervalAsInt, err := strconv.Atoi(info.Config[forceFlushIntervalKey]) if err != nil { return nil, err } forceFlushInterval = time.Duration(forceFlushIntervalAsInt) * time.Second } maxBufferedEvents := int(defaultMaxBufferedEvents) if info.Config[maxBufferedEventsKey] != "" { maxBufferedEvents, err = strconv.Atoi(info.Config[maxBufferedEventsKey]) if err != nil { return nil, err } } if info.Config[logStreamKey] != "" { logStreamName = info.Config[logStreamKey] } logCreateStream := true if info.Config[logCreateStreamKey] != "" { logCreateStream, err = strconv.ParseBool(info.Config[logCreateStreamKey]) if err != nil { return nil, err } } multilinePattern, err := parseMultilineOptions(info) if err != nil { return nil, err } containerStreamConfig := &logStreamConfig{ logStreamName: logStreamName, logGroupName: logGroupName, logCreateGroup: logCreateGroup, logCreateStream: logCreateStream, logNonBlocking: logNonBlocking, forceFlushInterval: forceFlushInterval, maxBufferedEvents: maxBufferedEvents, multilinePattern: multilinePattern, } return containerStreamConfig, nil } // Parses awslogs-multiline-pattern and awslogs-datetime-format options // If awslogs-datetime-format is present, convert the format from strftime // to regexp and return. // If awslogs-multiline-pattern is present, compile regexp and return func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) { dateTimeFormat := info.Config[datetimeFormatKey] multilinePatternKey := info.Config[multilinePatternKey] // strftime input is parsed into a regular expression if dateTimeFormat != "" { // %. matches each strftime format sequence and ReplaceAllStringFunc // looks up each format sequence in the conversion table strftimeToRegex // to replace with a defined regular expression r := regexp.MustCompile("%.") multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string { return strftimeToRegex[s] }) } if multilinePatternKey != "" { multilinePattern, err := regexp.Compile(multilinePatternKey) if err != nil { return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey) } return multilinePattern, nil } return nil, nil } // Maps strftime format strings to regex var strftimeToRegex = map[string]string{ /*weekdayShort */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`, /*weekdayFull */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`, /*weekdayZeroIndex */ `%w`: `[0-6]`, /*dayZeroPadded */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`, /*monthShort */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`, /*monthFull */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`, /*monthZeroPadded */ `%m`: `(?:0[1-9]|1[0-2])`, /*yearCentury */ `%Y`: `\d{4}`, /*yearZeroPadded */ `%y`: `\d{2}`, /*hour24ZeroPadded */ `%H`: `(?:[0,1][0-9]|2[0-3])`, /*hour12ZeroPadded */ `%I`: `(?:0[0-9]|1[0-2])`, /*AM or PM */ `%p`: "[A,P]M", /*minuteZeroPadded */ `%M`: `[0-5][0-9]`, /*secondZeroPadded */ `%S`: `[0-5][0-9]`, /*microsecondZeroPadded */ `%f`: `\d{6}`, /*utcOffset */ `%z`: `[+-]\d{4}`, /*tzName */ `%Z`: `[A-Z]{1,4}T`, /*dayOfYearZeroPadded */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`, /*milliseconds */ `%L`: `\.\d{3}`, } // newRegionFinder is a variable such that the implementation // can be swapped out for unit tests. var newRegionFinder = func() (regionFinder, error) { s, err := session.NewSession() if err != nil { return nil, err } return ec2metadata.New(s), nil } // newSDKEndpoint is a variable such that the implementation // can be swapped out for unit tests. var newSDKEndpoint = credentialsEndpoint // newAWSLogsClient creates the service client for Amazon CloudWatch Logs. // Customizations to the default client from the SDK include a Docker-specific // User-Agent string and automatic region detection using the EC2 Instance // Metadata Service when region is otherwise unspecified. func newAWSLogsClient(info logger.Info) (api, error) { var region, endpoint *string if os.Getenv(regionEnvKey) != "" { region = aws.String(os.Getenv(regionEnvKey)) } if info.Config[regionKey] != "" { region = aws.String(info.Config[regionKey]) } if info.Config[endpointKey] != "" { endpoint = aws.String(info.Config[endpointKey]) } if region == nil || *region == "" { logrus.Info("Trying to get region from EC2 Metadata") ec2MetadataClient, err := newRegionFinder() if err != nil { logrus.WithError(err).Error("could not create EC2 metadata client") return nil, errors.Wrap(err, "could not create EC2 metadata client") } r, err := ec2MetadataClient.Region() if err != nil { logrus.WithError(err).Error("Could not get region from EC2 metadata, environment, or log option") return nil, errors.New("Cannot determine region for awslogs driver") } region = &r } sess, err := session.NewSession() if err != nil { return nil, errors.New("Failed to create a service client session for awslogs driver") } // attach region to cloudwatchlogs config sess.Config.Region = region // attach endpoint to cloudwatchlogs config if endpoint != nil { sess.Config.Endpoint = endpoint } if uri, ok := info.Config[credentialsEndpointKey]; ok { logrus.Debugf("Trying to get credentials from awslogs-credentials-endpoint") endpoint := fmt.Sprintf("%s%s", newSDKEndpoint, uri) creds := endpointcreds.NewCredentialsClient(*sess.Config, sess.Handlers, endpoint, func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute }) // attach credentials to cloudwatchlogs config sess.Config.Credentials = creds } logrus.WithFields(logrus.Fields{ "region": *region, }).Debug("Created awslogs client") client := cloudwatchlogs.New(sess) client.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "DockerUserAgentHandler", Fn: func(r *request.Request) { currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) r.HTTPRequest.Header.Set(userAgentHeader, fmt.Sprintf("Docker %s (%s) %s", dockerversion.Version, runtime.GOOS, currentAgent)) }, }) return client, nil } // Name returns the name of the awslogs logging driver func (l *logStream) Name() string { return name } // BufSize returns the maximum bytes CloudWatch can handle. func (l *logStream) BufSize() int { return maximumBytesPerEvent } // Log submits messages for logging by an instance of the awslogs logging driver func (l *logStream) Log(msg *logger.Message) error { l.lock.RLock() defer l.lock.RUnlock() if l.closed { return errors.New("awslogs is closed") } if l.logNonBlocking { select { case l.messages <- msg: return nil default: return errors.New("awslogs buffer is full") } } l.messages <- msg return nil } // Close closes the instance of the awslogs logging driver func (l *logStream) Close() error { l.lock.Lock() defer l.lock.Unlock() if !l.closed { close(l.messages) } l.closed = true return nil } // create creates log group and log stream for the instance of the awslogs logging driver func (l *logStream) create() error { err := l.createLogStream() if err == nil { return nil } if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == resourceNotFoundCode && l.logCreateGroup { if err := l.createLogGroup(); err != nil { return errors.Wrap(err, "failed to create Cloudwatch log group") } err = l.createLogStream() if err == nil { return nil } } return errors.Wrap(err, "failed to create Cloudwatch log stream") } // createLogGroup creates a log group for the instance of the awslogs logging driver func (l *logStream) createLogGroup() error { if _, err := l.client.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ LogGroupName: aws.String(l.logGroupName), }); err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logCreateGroup": l.logCreateGroup, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log group already exists") return nil } logrus.WithFields(fields).Error("Failed to create log group") } return err } return nil } // createLogStream creates a log stream for the instance of the awslogs logging driver func (l *logStream) createLogStream() error { // Directly return if we do not want to create log stream. if !l.logCreateStream { logrus.WithFields(logrus.Fields{ "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, "logCreateStream": l.logCreateStream, }).Info("Skipping creating log stream") return nil } input := &cloudwatchlogs.CreateLogStreamInput{ LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } _, err := l.client.CreateLogStream(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log stream already exists") return nil } logrus.WithFields(fields).Error("Failed to create log stream") } } return err } // newTicker is used for time-based batching. newTicker is a variable such // that the implementation can be swapped out for unit tests. var newTicker = func(freq time.Duration) *time.Ticker { return time.NewTicker(freq) } // collectBatch executes as a goroutine to perform batching of log events for // submission to the log stream. If the awslogs-multiline-pattern or // awslogs-datetime-format options have been configured, multiline processing // is enabled, where log messages are stored in an event buffer until a multiline // pattern match is found, at which point the messages in the event buffer are // pushed to CloudWatch logs as a single log event. Multiline messages are processed // according to the maximumBytesPerPut constraint, and the implementation only // allows for messages to be buffered for a maximum of 2*batchPublishFrequency // seconds. When events are ready to be processed for submission to CloudWatch // Logs, the processEvents method is called. If a multiline pattern is not // configured, log events are submitted to the processEvents method immediately. func (l *logStream) collectBatch(created chan bool) { // Wait for the logstream/group to be created <-created flushInterval := l.forceFlushInterval if flushInterval <= 0 { flushInterval = defaultForceFlushInterval } ticker := newTicker(flushInterval) var eventBuffer []byte var eventBufferTimestamp int64 var batch = newEventBatch() for { select { case t := <-ticker.C: // If event buffer is older than batch publish frequency flush the event buffer if eventBufferTimestamp > 0 && len(eventBuffer) > 0 { eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp eventBufferExpired := eventBufferAge >= int64(flushInterval)/int64(time.Millisecond) eventBufferNegative := eventBufferAge < 0 if eventBufferExpired || eventBufferNegative { l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBuffer = eventBuffer[:0] } } l.publishBatch(batch) batch.reset() case msg, more := <-l.messages: if !more { // Flush event buffer and release resources l.processEvent(batch, eventBuffer, eventBufferTimestamp) l.publishBatch(batch) batch.reset() return } if eventBufferTimestamp == 0 { eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) } line := msg.Line if l.multilinePattern != nil { lineEffectiveLen := effectiveLen(string(line)) if l.multilinePattern.Match(line) || effectiveLen(string(eventBuffer))+lineEffectiveLen > maximumBytesPerEvent { // This is a new log event or we will exceed max bytes per event // so flush the current eventBuffer to events and reset timestamp l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) eventBuffer = eventBuffer[:0] } // Append newline if event is less than max event size if lineEffectiveLen < maximumBytesPerEvent { line = append(line, "\n"...) } eventBuffer = append(eventBuffer, line...) logger.PutMessage(msg) } else { l.processEvent(batch, line, msg.Timestamp.UnixNano()/int64(time.Millisecond)) logger.PutMessage(msg) } } } } // processEvent processes log events that are ready for submission to CloudWatch // logs. Batching is performed on time- and size-bases. Time-based batching // occurs at a 5 second interval (defined in the batchPublishFrequency const). // Size-based batching is performed on the maximum number of events per batch // (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a // batch (defined in maximumBytesPerPut). Log messages are split by the maximum // bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event // byte overhead (defined in perEventBytes) which is accounted for in split- and // batch-calculations. Because the events are interpreted as UTF-8 encoded // Unicode, invalid UTF-8 byte sequences are replaced with the Unicode // replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To // compensate for that and to avoid splitting valid UTF-8 characters into // invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. func (l *logStream) processEvent(batch *eventBatch, bytes []byte, timestamp int64) { for len(bytes) > 0 { // Split line length so it does not exceed the maximum splitOffset, lineBytes := findValidSplit(string(bytes), maximumBytesPerEvent) line := bytes[:splitOffset] event := wrappedEvent{ inputLogEvent: &cloudwatchlogs.InputLogEvent{ Message: aws.String(string(line)), Timestamp: aws.Int64(timestamp), }, insertOrder: batch.count(), } added := batch.add(event, lineBytes) if added { bytes = bytes[splitOffset:] } else { l.publishBatch(batch) batch.reset() } } } // effectiveLen counts the effective number of bytes in the string, after // UTF-8 normalization. UTF-8 normalization includes replacing bytes that do // not constitute valid UTF-8 encoded Unicode codepoints with the Unicode // replacement codepoint U+FFFD (a 3-byte UTF-8 sequence, represented in Go as // utf8.RuneError) func effectiveLen(line string) int { effectiveBytes := 0 for _, rune := range line { effectiveBytes += utf8.RuneLen(rune) } return effectiveBytes } // findValidSplit finds the byte offset to split a string without breaking valid // Unicode codepoints given a maximum number of total bytes. findValidSplit // returns the byte offset for splitting a string or []byte, as well as the // effective number of bytes if the string were normalized to replace invalid // UTF-8 encoded bytes with the Unicode replacement character (a 3-byte UTF-8 // sequence, represented in Go as utf8.RuneError) func findValidSplit(line string, maxBytes int) (splitOffset, effectiveBytes int) { for offset, rune := range line { splitOffset = offset if effectiveBytes+utf8.RuneLen(rune) > maxBytes { return splitOffset, effectiveBytes } effectiveBytes += utf8.RuneLen(rune) } splitOffset = len(line) return } // publishBatch calls PutLogEvents for a given set of InputLogEvents, // accounting for sequencing requirements (each request must reference the // sequence token returned by the previous request). func (l *logStream) publishBatch(batch *eventBatch) { if batch.isEmpty() { return } cwEvents := unwrapEvents(batch.events()) nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == dataAlreadyAcceptedCode { // already submitted, just grab the correct sequence token parts := strings.Split(awsErr.Message(), " ") nextSequenceToken = &parts[len(parts)-1] logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Info("Data already accepted, ignoring error") err = nil } else if awsErr.Code() == invalidSequenceTokenCode { // sequence code is bad, grab the correct one and retry parts := strings.Split(awsErr.Message(), " ") token := parts[len(parts)-1] nextSequenceToken, err = l.putLogEvents(cwEvents, &token) } } } if err != nil { logrus.Error(err) } else { l.sequenceToken = nextSequenceToken } } // putLogEvents wraps the PutLogEvents API func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { input := &cloudwatchlogs.PutLogEventsInput{ LogEvents: events, SequenceToken: sequenceToken, LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } resp, err := l.client.PutLogEvents(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Error("Failed to put log events") } return nil, err } return resp.NextSequenceToken, nil } // ValidateLogOpt looks for awslogs-specific log options awslogs-region, awslogs-endpoint // awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format, // awslogs-multiline-pattern func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case logGroupKey: case logStreamKey: case logCreateGroupKey: case regionKey: case endpointKey: case tagKey: case datetimeFormatKey: case multilinePatternKey: case credentialsEndpointKey: case forceFlushIntervalKey: case maxBufferedEventsKey: default: return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) } } if cfg[logGroupKey] == "" { return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) } if cfg[logCreateGroupKey] != "" { if _, err := strconv.ParseBool(cfg[logCreateGroupKey]); err != nil { return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err) } } if cfg[forceFlushIntervalKey] != "" { if value, err := strconv.Atoi(cfg[forceFlushIntervalKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", forceFlushIntervalKey, cfg[forceFlushIntervalKey]) } } if cfg[maxBufferedEventsKey] != "" { if value, err := strconv.Atoi(cfg[maxBufferedEventsKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", maxBufferedEventsKey, cfg[maxBufferedEventsKey]) } } _, datetimeFormatKeyExists := cfg[datetimeFormatKey] _, multilinePatternKeyExists := cfg[multilinePatternKey] if datetimeFormatKeyExists && multilinePatternKeyExists { return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey) } return nil } // Len returns the length of a byTimestamp slice. Len is required by the // sort.Interface interface. func (slice byTimestamp) Len() int { return len(slice) } // Less compares two values in a byTimestamp slice by Timestamp. Less is // required by the sort.Interface interface. func (slice byTimestamp) Less(i, j int) bool { iTimestamp, jTimestamp := int64(0), int64(0) if slice != nil && slice[i].inputLogEvent.Timestamp != nil { iTimestamp = *slice[i].inputLogEvent.Timestamp } if slice != nil && slice[j].inputLogEvent.Timestamp != nil { jTimestamp = *slice[j].inputLogEvent.Timestamp } if iTimestamp == jTimestamp { return slice[i].insertOrder < slice[j].insertOrder } return iTimestamp < jTimestamp } // Swap swaps two values in a byTimestamp slice with each other. Swap is // required by the sort.Interface interface. func (slice byTimestamp) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events)) for i, input := range events { cwEvents[i] = input.inputLogEvent } return cwEvents } func newEventBatch() *eventBatch { return &eventBatch{ batch: make([]wrappedEvent, 0), bytes: 0, } } // events returns a slice of wrappedEvents sorted in order of their // timestamps and then by their insertion order (see `byTimestamp`). // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) events() []wrappedEvent { sort.Sort(byTimestamp(b.batch)) return b.batch } // add adds an event to the batch of events accounting for the // necessary overhead for an event to be logged. An error will be // returned if the event cannot be added to the batch due to service // limits. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) add(event wrappedEvent, size int) bool { addBytes := size + perEventBytes // verify we are still within service limits switch { case len(b.batch)+1 > maximumLogEventsPerPut: return false case b.bytes+addBytes > maximumBytesPerPut: return false } b.bytes += addBytes b.batch = append(b.batch, event) return true } // count is the number of batched events. Warning: this method // is not threadsafe and must not be used concurrently. func (b *eventBatch) count() int { return len(b.batch) } // size is the total number of bytes that the batch represents. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) size() int { return b.bytes } func (b *eventBatch) isEmpty() bool { zeroEvents := b.count() == 0 zeroSize := b.size() == 0 return zeroEvents && zeroSize } // reset prepares the batch for reuse. func (b *eventBatch) reset() { b.bytes = 0 b.batch = b.batch[:0] }
// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs package awslogs // import "github.com/docker/docker/daemon/logger/awslogs" import ( "fmt" "os" "regexp" "runtime" "sort" "strconv" "strings" "sync" "time" "unicode/utf8" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/dockerversion" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( name = "awslogs" regionKey = "awslogs-region" endpointKey = "awslogs-endpoint" regionEnvKey = "AWS_REGION" logGroupKey = "awslogs-group" logStreamKey = "awslogs-stream" logCreateGroupKey = "awslogs-create-group" logCreateStreamKey = "awslogs-create-stream" tagKey = "tag" datetimeFormatKey = "awslogs-datetime-format" multilinePatternKey = "awslogs-multiline-pattern" credentialsEndpointKey = "awslogs-credentials-endpoint" forceFlushIntervalKey = "awslogs-force-flush-interval-seconds" maxBufferedEventsKey = "awslogs-max-buffered-events" logFormatKey = "awslogs-format" defaultForceFlushInterval = 5 * time.Second defaultMaxBufferedEvents = 4096 // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html perEventBytes = 26 maximumBytesPerPut = 1048576 maximumLogEventsPerPut = 10000 // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html // Because the events are interpreted as UTF-8 encoded Unicode, invalid UTF-8 byte sequences are replaced with the // Unicode replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To compensate for that and to avoid // splitting valid UTF-8 characters into invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. maximumBytesPerEvent = 262144 - perEventBytes resourceAlreadyExistsCode = "ResourceAlreadyExistsException" dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" invalidSequenceTokenCode = "InvalidSequenceTokenException" resourceNotFoundCode = "ResourceNotFoundException" credentialsEndpoint = "http://169.254.170.2" userAgentHeader = "User-Agent" // See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format_Specification.html logsFormatHeader = "x-amzn-logs-format" jsonEmfLogFormat = "json/emf" ) type logStream struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration multilinePattern *regexp.Regexp client api messages chan *logger.Message lock sync.RWMutex closed bool sequenceToken *string } type logStreamConfig struct { logStreamName string logGroupName string logCreateGroup bool logCreateStream bool logNonBlocking bool forceFlushInterval time.Duration maxBufferedEvents int multilinePattern *regexp.Regexp } var _ logger.SizedLogger = &logStream{} type api interface { CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) } type regionFinder interface { Region() (string, error) } type wrappedEvent struct { inputLogEvent *cloudwatchlogs.InputLogEvent insertOrder int } type byTimestamp []wrappedEvent // init registers the awslogs driver func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // eventBatch holds the events that are batched for submission and the // associated data about it. // // Warning: this type is not threadsafe and must not be used // concurrently. This type is expected to be consumed in a single go // routine and never concurrently. type eventBatch struct { batch []wrappedEvent bytes int } // New creates an awslogs logger using the configuration passed in on the // context. Supported context configuration variables are awslogs-region, // awslogs-endpoint, awslogs-group, awslogs-stream, awslogs-create-group, // awslogs-multiline-pattern and awslogs-datetime-format. // When available, configuration is also taken from environment variables // AWS_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, the shared credentials // file (~/.aws/credentials), and the EC2 Instance Metadata Service. func New(info logger.Info) (logger.Logger, error) { containerStreamConfig, err := newStreamConfig(info) if err != nil { return nil, err } client, err := newAWSLogsClient(info) if err != nil { return nil, err } containerStream := &logStream{ logStreamName: containerStreamConfig.logStreamName, logGroupName: containerStreamConfig.logGroupName, logCreateGroup: containerStreamConfig.logCreateGroup, logCreateStream: containerStreamConfig.logCreateStream, logNonBlocking: containerStreamConfig.logNonBlocking, forceFlushInterval: containerStreamConfig.forceFlushInterval, multilinePattern: containerStreamConfig.multilinePattern, client: client, messages: make(chan *logger.Message, containerStreamConfig.maxBufferedEvents), } creationDone := make(chan bool) if containerStream.logNonBlocking { go func() { backoff := 1 maxBackoff := 32 for { // If logger is closed we are done containerStream.lock.RLock() if containerStream.closed { containerStream.lock.RUnlock() break } containerStream.lock.RUnlock() err := containerStream.create() if err == nil { break } time.Sleep(time.Duration(backoff) * time.Second) if backoff < maxBackoff { backoff *= 2 } logrus. WithError(err). WithField("container-id", info.ContainerID). WithField("container-name", info.ContainerName). Error("Error while trying to initialize awslogs. Retrying in: ", backoff, " seconds") } close(creationDone) }() } else { if err = containerStream.create(); err != nil { return nil, err } close(creationDone) } go containerStream.collectBatch(creationDone) return containerStream, nil } // Parses most of the awslogs- options and prepares a config object to be used for newing the actual stream // It has been formed out to ease Utest of the New above func newStreamConfig(info logger.Info) (*logStreamConfig, error) { logGroupName := info.Config[logGroupKey] logStreamName, err := loggerutils.ParseLogTag(info, "{{.FullID}}") if err != nil { return nil, err } logCreateGroup := false if info.Config[logCreateGroupKey] != "" { logCreateGroup, err = strconv.ParseBool(info.Config[logCreateGroupKey]) if err != nil { return nil, err } } logNonBlocking := info.Config["mode"] == "non-blocking" forceFlushInterval := defaultForceFlushInterval if info.Config[forceFlushIntervalKey] != "" { forceFlushIntervalAsInt, err := strconv.Atoi(info.Config[forceFlushIntervalKey]) if err != nil { return nil, err } forceFlushInterval = time.Duration(forceFlushIntervalAsInt) * time.Second } maxBufferedEvents := int(defaultMaxBufferedEvents) if info.Config[maxBufferedEventsKey] != "" { maxBufferedEvents, err = strconv.Atoi(info.Config[maxBufferedEventsKey]) if err != nil { return nil, err } } if info.Config[logStreamKey] != "" { logStreamName = info.Config[logStreamKey] } logCreateStream := true if info.Config[logCreateStreamKey] != "" { logCreateStream, err = strconv.ParseBool(info.Config[logCreateStreamKey]) if err != nil { return nil, err } } multilinePattern, err := parseMultilineOptions(info) if err != nil { return nil, err } containerStreamConfig := &logStreamConfig{ logStreamName: logStreamName, logGroupName: logGroupName, logCreateGroup: logCreateGroup, logCreateStream: logCreateStream, logNonBlocking: logNonBlocking, forceFlushInterval: forceFlushInterval, maxBufferedEvents: maxBufferedEvents, multilinePattern: multilinePattern, } return containerStreamConfig, nil } // Parses awslogs-multiline-pattern and awslogs-datetime-format options // If awslogs-datetime-format is present, convert the format from strftime // to regexp and return. // If awslogs-multiline-pattern is present, compile regexp and return func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) { dateTimeFormat := info.Config[datetimeFormatKey] multilinePatternKey := info.Config[multilinePatternKey] // strftime input is parsed into a regular expression if dateTimeFormat != "" { // %. matches each strftime format sequence and ReplaceAllStringFunc // looks up each format sequence in the conversion table strftimeToRegex // to replace with a defined regular expression r := regexp.MustCompile("%.") multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string { return strftimeToRegex[s] }) } if multilinePatternKey != "" { multilinePattern, err := regexp.Compile(multilinePatternKey) if err != nil { return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey) } return multilinePattern, nil } return nil, nil } // Maps strftime format strings to regex var strftimeToRegex = map[string]string{ /*weekdayShort */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`, /*weekdayFull */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`, /*weekdayZeroIndex */ `%w`: `[0-6]`, /*dayZeroPadded */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`, /*monthShort */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`, /*monthFull */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`, /*monthZeroPadded */ `%m`: `(?:0[1-9]|1[0-2])`, /*yearCentury */ `%Y`: `\d{4}`, /*yearZeroPadded */ `%y`: `\d{2}`, /*hour24ZeroPadded */ `%H`: `(?:[0,1][0-9]|2[0-3])`, /*hour12ZeroPadded */ `%I`: `(?:0[0-9]|1[0-2])`, /*AM or PM */ `%p`: "[A,P]M", /*minuteZeroPadded */ `%M`: `[0-5][0-9]`, /*secondZeroPadded */ `%S`: `[0-5][0-9]`, /*microsecondZeroPadded */ `%f`: `\d{6}`, /*utcOffset */ `%z`: `[+-]\d{4}`, /*tzName */ `%Z`: `[A-Z]{1,4}T`, /*dayOfYearZeroPadded */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`, /*milliseconds */ `%L`: `\.\d{3}`, } // newRegionFinder is a variable such that the implementation // can be swapped out for unit tests. var newRegionFinder = func() (regionFinder, error) { s, err := session.NewSession() if err != nil { return nil, err } return ec2metadata.New(s), nil } // newSDKEndpoint is a variable such that the implementation // can be swapped out for unit tests. var newSDKEndpoint = credentialsEndpoint // newAWSLogsClient creates the service client for Amazon CloudWatch Logs. // Customizations to the default client from the SDK include a Docker-specific // User-Agent string and automatic region detection using the EC2 Instance // Metadata Service when region is otherwise unspecified. func newAWSLogsClient(info logger.Info) (api, error) { var region, endpoint *string if os.Getenv(regionEnvKey) != "" { region = aws.String(os.Getenv(regionEnvKey)) } if info.Config[regionKey] != "" { region = aws.String(info.Config[regionKey]) } if info.Config[endpointKey] != "" { endpoint = aws.String(info.Config[endpointKey]) } if region == nil || *region == "" { logrus.Info("Trying to get region from EC2 Metadata") ec2MetadataClient, err := newRegionFinder() if err != nil { logrus.WithError(err).Error("could not create EC2 metadata client") return nil, errors.Wrap(err, "could not create EC2 metadata client") } r, err := ec2MetadataClient.Region() if err != nil { logrus.WithError(err).Error("Could not get region from EC2 metadata, environment, or log option") return nil, errors.New("Cannot determine region for awslogs driver") } region = &r } sess, err := session.NewSession() if err != nil { return nil, errors.New("Failed to create a service client session for awslogs driver") } // attach region to cloudwatchlogs config sess.Config.Region = region // attach endpoint to cloudwatchlogs config if endpoint != nil { sess.Config.Endpoint = endpoint } if uri, ok := info.Config[credentialsEndpointKey]; ok { logrus.Debugf("Trying to get credentials from awslogs-credentials-endpoint") endpoint := fmt.Sprintf("%s%s", newSDKEndpoint, uri) creds := endpointcreds.NewCredentialsClient(*sess.Config, sess.Handlers, endpoint, func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute }) // attach credentials to cloudwatchlogs config sess.Config.Credentials = creds } logrus.WithFields(logrus.Fields{ "region": *region, }).Debug("Created awslogs client") client := cloudwatchlogs.New(sess) client.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "DockerUserAgentHandler", Fn: func(r *request.Request) { currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) r.HTTPRequest.Header.Set(userAgentHeader, fmt.Sprintf("Docker %s (%s) %s", dockerversion.Version, runtime.GOOS, currentAgent)) }, }) if info.Config[logFormatKey] != "" { client.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "LogFormatHeaderHandler", Fn: func(req *request.Request) { req.HTTPRequest.Header.Set(logsFormatHeader, info.Config[logFormatKey]) }, }) } return client, nil } // Name returns the name of the awslogs logging driver func (l *logStream) Name() string { return name } // BufSize returns the maximum bytes CloudWatch can handle. func (l *logStream) BufSize() int { return maximumBytesPerEvent } // Log submits messages for logging by an instance of the awslogs logging driver func (l *logStream) Log(msg *logger.Message) error { l.lock.RLock() defer l.lock.RUnlock() if l.closed { return errors.New("awslogs is closed") } if l.logNonBlocking { select { case l.messages <- msg: return nil default: return errors.New("awslogs buffer is full") } } l.messages <- msg return nil } // Close closes the instance of the awslogs logging driver func (l *logStream) Close() error { l.lock.Lock() defer l.lock.Unlock() if !l.closed { close(l.messages) } l.closed = true return nil } // create creates log group and log stream for the instance of the awslogs logging driver func (l *logStream) create() error { err := l.createLogStream() if err == nil { return nil } if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == resourceNotFoundCode && l.logCreateGroup { if err := l.createLogGroup(); err != nil { return errors.Wrap(err, "failed to create Cloudwatch log group") } err = l.createLogStream() if err == nil { return nil } } return errors.Wrap(err, "failed to create Cloudwatch log stream") } // createLogGroup creates a log group for the instance of the awslogs logging driver func (l *logStream) createLogGroup() error { if _, err := l.client.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ LogGroupName: aws.String(l.logGroupName), }); err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logCreateGroup": l.logCreateGroup, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log group already exists") return nil } logrus.WithFields(fields).Error("Failed to create log group") } return err } return nil } // createLogStream creates a log stream for the instance of the awslogs logging driver func (l *logStream) createLogStream() error { // Directly return if we do not want to create log stream. if !l.logCreateStream { logrus.WithFields(logrus.Fields{ "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, "logCreateStream": l.logCreateStream, }).Info("Skipping creating log stream") return nil } input := &cloudwatchlogs.CreateLogStreamInput{ LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } _, err := l.client.CreateLogStream(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log stream already exists") return nil } logrus.WithFields(fields).Error("Failed to create log stream") } } return err } // newTicker is used for time-based batching. newTicker is a variable such // that the implementation can be swapped out for unit tests. var newTicker = func(freq time.Duration) *time.Ticker { return time.NewTicker(freq) } // collectBatch executes as a goroutine to perform batching of log events for // submission to the log stream. If the awslogs-multiline-pattern or // awslogs-datetime-format options have been configured, multiline processing // is enabled, where log messages are stored in an event buffer until a multiline // pattern match is found, at which point the messages in the event buffer are // pushed to CloudWatch logs as a single log event. Multiline messages are processed // according to the maximumBytesPerPut constraint, and the implementation only // allows for messages to be buffered for a maximum of 2*batchPublishFrequency // seconds. When events are ready to be processed for submission to CloudWatch // Logs, the processEvents method is called. If a multiline pattern is not // configured, log events are submitted to the processEvents method immediately. func (l *logStream) collectBatch(created chan bool) { // Wait for the logstream/group to be created <-created flushInterval := l.forceFlushInterval if flushInterval <= 0 { flushInterval = defaultForceFlushInterval } ticker := newTicker(flushInterval) var eventBuffer []byte var eventBufferTimestamp int64 var batch = newEventBatch() for { select { case t := <-ticker.C: // If event buffer is older than batch publish frequency flush the event buffer if eventBufferTimestamp > 0 && len(eventBuffer) > 0 { eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp eventBufferExpired := eventBufferAge >= int64(flushInterval)/int64(time.Millisecond) eventBufferNegative := eventBufferAge < 0 if eventBufferExpired || eventBufferNegative { l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBuffer = eventBuffer[:0] } } l.publishBatch(batch) batch.reset() case msg, more := <-l.messages: if !more { // Flush event buffer and release resources l.processEvent(batch, eventBuffer, eventBufferTimestamp) l.publishBatch(batch) batch.reset() return } if eventBufferTimestamp == 0 { eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) } line := msg.Line if l.multilinePattern != nil { lineEffectiveLen := effectiveLen(string(line)) if l.multilinePattern.Match(line) || effectiveLen(string(eventBuffer))+lineEffectiveLen > maximumBytesPerEvent { // This is a new log event or we will exceed max bytes per event // so flush the current eventBuffer to events and reset timestamp l.processEvent(batch, eventBuffer, eventBufferTimestamp) eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) eventBuffer = eventBuffer[:0] } // Append newline if event is less than max event size if lineEffectiveLen < maximumBytesPerEvent { line = append(line, "\n"...) } eventBuffer = append(eventBuffer, line...) logger.PutMessage(msg) } else { l.processEvent(batch, line, msg.Timestamp.UnixNano()/int64(time.Millisecond)) logger.PutMessage(msg) } } } } // processEvent processes log events that are ready for submission to CloudWatch // logs. Batching is performed on time- and size-bases. Time-based batching // occurs at a 5 second interval (defined in the batchPublishFrequency const). // Size-based batching is performed on the maximum number of events per batch // (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a // batch (defined in maximumBytesPerPut). Log messages are split by the maximum // bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event // byte overhead (defined in perEventBytes) which is accounted for in split- and // batch-calculations. Because the events are interpreted as UTF-8 encoded // Unicode, invalid UTF-8 byte sequences are replaced with the Unicode // replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To // compensate for that and to avoid splitting valid UTF-8 characters into // invalid byte sequences, we calculate the length of each event assuming that // this replacement happens. func (l *logStream) processEvent(batch *eventBatch, bytes []byte, timestamp int64) { for len(bytes) > 0 { // Split line length so it does not exceed the maximum splitOffset, lineBytes := findValidSplit(string(bytes), maximumBytesPerEvent) line := bytes[:splitOffset] event := wrappedEvent{ inputLogEvent: &cloudwatchlogs.InputLogEvent{ Message: aws.String(string(line)), Timestamp: aws.Int64(timestamp), }, insertOrder: batch.count(), } added := batch.add(event, lineBytes) if added { bytes = bytes[splitOffset:] } else { l.publishBatch(batch) batch.reset() } } } // effectiveLen counts the effective number of bytes in the string, after // UTF-8 normalization. UTF-8 normalization includes replacing bytes that do // not constitute valid UTF-8 encoded Unicode codepoints with the Unicode // replacement codepoint U+FFFD (a 3-byte UTF-8 sequence, represented in Go as // utf8.RuneError) func effectiveLen(line string) int { effectiveBytes := 0 for _, rune := range line { effectiveBytes += utf8.RuneLen(rune) } return effectiveBytes } // findValidSplit finds the byte offset to split a string without breaking valid // Unicode codepoints given a maximum number of total bytes. findValidSplit // returns the byte offset for splitting a string or []byte, as well as the // effective number of bytes if the string were normalized to replace invalid // UTF-8 encoded bytes with the Unicode replacement character (a 3-byte UTF-8 // sequence, represented in Go as utf8.RuneError) func findValidSplit(line string, maxBytes int) (splitOffset, effectiveBytes int) { for offset, rune := range line { splitOffset = offset if effectiveBytes+utf8.RuneLen(rune) > maxBytes { return splitOffset, effectiveBytes } effectiveBytes += utf8.RuneLen(rune) } splitOffset = len(line) return } // publishBatch calls PutLogEvents for a given set of InputLogEvents, // accounting for sequencing requirements (each request must reference the // sequence token returned by the previous request). func (l *logStream) publishBatch(batch *eventBatch) { if batch.isEmpty() { return } cwEvents := unwrapEvents(batch.events()) nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == dataAlreadyAcceptedCode { // already submitted, just grab the correct sequence token parts := strings.Split(awsErr.Message(), " ") nextSequenceToken = &parts[len(parts)-1] logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Info("Data already accepted, ignoring error") err = nil } else if awsErr.Code() == invalidSequenceTokenCode { // sequence code is bad, grab the correct one and retry parts := strings.Split(awsErr.Message(), " ") token := parts[len(parts)-1] nextSequenceToken, err = l.putLogEvents(cwEvents, &token) } } } if err != nil { logrus.Error(err) } else { l.sequenceToken = nextSequenceToken } } // putLogEvents wraps the PutLogEvents API func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { input := &cloudwatchlogs.PutLogEventsInput{ LogEvents: events, SequenceToken: sequenceToken, LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } resp, err := l.client.PutLogEvents(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Error("Failed to put log events") } return nil, err } return resp.NextSequenceToken, nil } // ValidateLogOpt looks for awslogs-specific log options awslogs-region, awslogs-endpoint // awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format, // awslogs-multiline-pattern func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case logGroupKey: case logStreamKey: case logCreateGroupKey: case regionKey: case endpointKey: case tagKey: case datetimeFormatKey: case multilinePatternKey: case credentialsEndpointKey: case forceFlushIntervalKey: case maxBufferedEventsKey: case logFormatKey: default: return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) } } if cfg[logGroupKey] == "" { return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) } if cfg[logCreateGroupKey] != "" { if _, err := strconv.ParseBool(cfg[logCreateGroupKey]); err != nil { return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err) } } if cfg[forceFlushIntervalKey] != "" { if value, err := strconv.Atoi(cfg[forceFlushIntervalKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", forceFlushIntervalKey, cfg[forceFlushIntervalKey]) } } if cfg[maxBufferedEventsKey] != "" { if value, err := strconv.Atoi(cfg[maxBufferedEventsKey]); err != nil || value <= 0 { return fmt.Errorf("must specify a positive integer for log opt '%s': %v", maxBufferedEventsKey, cfg[maxBufferedEventsKey]) } } _, datetimeFormatKeyExists := cfg[datetimeFormatKey] _, multilinePatternKeyExists := cfg[multilinePatternKey] if datetimeFormatKeyExists && multilinePatternKeyExists { return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey) } if cfg[logFormatKey] != "" { // For now, only the "json/emf" log format is supported if cfg[logFormatKey] != jsonEmfLogFormat { return fmt.Errorf("unsupported log format '%s'", cfg[logFormatKey]) } if datetimeFormatKeyExists || multilinePatternKeyExists { return fmt.Errorf("you cannot configure log opt '%s' or '%s' when log opt '%s' is set to '%s'", datetimeFormatKey, multilinePatternKey, logFormatKey, jsonEmfLogFormat) } } return nil } // Len returns the length of a byTimestamp slice. Len is required by the // sort.Interface interface. func (slice byTimestamp) Len() int { return len(slice) } // Less compares two values in a byTimestamp slice by Timestamp. Less is // required by the sort.Interface interface. func (slice byTimestamp) Less(i, j int) bool { iTimestamp, jTimestamp := int64(0), int64(0) if slice != nil && slice[i].inputLogEvent.Timestamp != nil { iTimestamp = *slice[i].inputLogEvent.Timestamp } if slice != nil && slice[j].inputLogEvent.Timestamp != nil { jTimestamp = *slice[j].inputLogEvent.Timestamp } if iTimestamp == jTimestamp { return slice[i].insertOrder < slice[j].insertOrder } return iTimestamp < jTimestamp } // Swap swaps two values in a byTimestamp slice with each other. Swap is // required by the sort.Interface interface. func (slice byTimestamp) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events)) for i, input := range events { cwEvents[i] = input.inputLogEvent } return cwEvents } func newEventBatch() *eventBatch { return &eventBatch{ batch: make([]wrappedEvent, 0), bytes: 0, } } // events returns a slice of wrappedEvents sorted in order of their // timestamps and then by their insertion order (see `byTimestamp`). // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) events() []wrappedEvent { sort.Sort(byTimestamp(b.batch)) return b.batch } // add adds an event to the batch of events accounting for the // necessary overhead for an event to be logged. An error will be // returned if the event cannot be added to the batch due to service // limits. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) add(event wrappedEvent, size int) bool { addBytes := size + perEventBytes // verify we are still within service limits switch { case len(b.batch)+1 > maximumLogEventsPerPut: return false case b.bytes+addBytes > maximumBytesPerPut: return false } b.bytes += addBytes b.batch = append(b.batch, event) return true } // count is the number of batched events. Warning: this method // is not threadsafe and must not be used concurrently. func (b *eventBatch) count() int { return len(b.batch) } // size is the total number of bytes that the batch represents. // // Warning: this method is not threadsafe and must not be used // concurrently. func (b *eventBatch) size() int { return b.bytes } func (b *eventBatch) isEmpty() bool { zeroEvents := b.count() == 0 zeroSize := b.size() == 0 return zeroEvents && zeroSize } // reset prepares the batch for reuse. func (b *eventBatch) reset() { b.bytes = 0 b.batch = b.batch[:0] }
sanjams2
b46ab1f57997feed98aa2cccd8e4f5a612b963bc
787b8fe14f34f04f723bd0913f293a59be428fe6
That's a great call. I actually didnt spend a lot of time investigating the other configurations and parameters of the aws log driver so thanks for calling this one out — I think it makes total sense.
sanjams2
4,451
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
cmd/dockerd/config.go
package main import ( "runtime" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/config" "github.com/docker/docker/opts" "github.com/docker/docker/plugin/executor/containerd" "github.com/docker/docker/registry" "github.com/spf13/pflag" ) const ( // defaultShutdownTimeout is the default shutdown timeout for the daemon defaultShutdownTimeout = 15 // defaultTrustKeyFile is the default filename for the trust key defaultTrustKeyFile = "key.json" ) // installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) error { var maxConcurrentDownloads, maxConcurrentUploads, maxDownloadAttempts int defaultPidFile, err := getDefaultPidFile() if err != nil { return err } defaultDataRoot, err := getDefaultDataRoot() if err != nil { return err } defaultExecRoot, err := getDefaultExecRoot() if err != nil { return err } installRegistryServiceFlags(&conf.ServiceOptions, flags) flags.Var(opts.NewNamedListOptsRef("storage-opts", &conf.GraphOptions, nil), "storage-opt", "Storage driver options") flags.Var(opts.NewNamedListOptsRef("authorization-plugins", &conf.AuthorizationPlugins, nil), "authorization-plugin", "Authorization plugins to load") flags.Var(opts.NewNamedListOptsRef("exec-opts", &conf.ExecOptions, nil), "exec-opt", "Runtime execution options") flags.StringVarP(&conf.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file") flags.StringVarP(&conf.Root, "graph", "g", defaultDataRoot, "Root of the Docker runtime") flags.StringVar(&conf.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files") flags.StringVar(&conf.ContainerdAddr, "containerd", "", "containerd grpc address") flags.BoolVar(&conf.CriContainerd, "cri-containerd", false, "start containerd with cri") // "--graph" is "soft-deprecated" in favor of "data-root". This flag was added // before Docker 1.0, so won't be removed, only hidden, to discourage its usage. _ = flags.MarkHidden("graph") flags.StringVar(&conf.Root, "data-root", defaultDataRoot, "Root directory of persistent Docker state") flags.BoolVarP(&conf.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") _ = flags.MarkDeprecated("restart", "Please use a restart policy on docker run") // Windows doesn't support setting the storage driver - there is no choice as to which ones to use. if runtime.GOOS != "windows" { flags.StringVarP(&conf.GraphDriver, "storage-driver", "s", "", "Storage driver to use") } flags.IntVar(&conf.Mtu, "mtu", 0, "Set the containers network MTU") flags.BoolVar(&conf.RawLogs, "raw-logs", false, "Full timestamps without ANSI coloring") flags.Var(opts.NewListOptsRef(&conf.DNS, opts.ValidateIPAddress), "dns", "DNS server to use") flags.Var(opts.NewNamedListOptsRef("dns-opts", &conf.DNSOptions, nil), "dns-opt", "DNS options to use") flags.Var(opts.NewListOptsRef(&conf.DNSSearch, opts.ValidateDNSSearch), "dns-search", "DNS search domains to use") flags.Var(opts.NewIPOpt(&conf.HostGatewayIP, ""), "host-gateway-ip", "IP address that the special 'host-gateway' string in --add-host resolves to. Defaults to the IP address of the default bridge") flags.Var(opts.NewNamedListOptsRef("labels", &conf.Labels, opts.ValidateLabel), "label", "Set key=value labels to the daemon") flags.StringVar(&conf.LogConfig.Type, "log-driver", "json-file", "Default driver for container logs") flags.Var(opts.NewNamedMapOpts("log-opts", conf.LogConfig.Config, nil), "log-opt", "Default log driver options for containers") flags.StringVar(&conf.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise") _ = flags.MarkDeprecated("cluster-advertise", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.StringVar(&conf.ClusterStore, "cluster-store", "", "URL of the distributed storage backend") _ = flags.MarkDeprecated("cluster-store", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.Var(opts.NewNamedMapOpts("cluster-store-opts", conf.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options") _ = flags.MarkDeprecated("cluster-store-opt", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.StringVar(&conf.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API") flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", config.DefaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull") flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", config.DefaultMaxConcurrentUploads, "Set the max concurrent uploads for each push") flags.IntVar(&maxDownloadAttempts, "max-download-attempts", config.DefaultDownloadAttempts, "Set the max download attempts for each pull") flags.IntVar(&conf.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout") flags.IntVar(&conf.NetworkDiagnosticPort, "network-diagnostic-port", 0, "TCP port number of the network diagnostic server") _ = flags.MarkHidden("network-diagnostic-port") flags.StringVar(&conf.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address") flags.BoolVar(&conf.Experimental, "experimental", false, "Enable experimental features") flags.StringVar(&conf.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on") flags.Var(opts.NewNamedListOptsRef("node-generic-resources", &conf.NodeGenericResources, opts.ValidateSingleGenericResource), "node-generic-resource", "Advertise user-defined resource") flags.IntVar(&conf.NetworkControlPlaneMTU, "network-control-plane-mtu", config.DefaultNetworkMtu, "Network Control plane MTU") conf.MaxConcurrentDownloads = &maxConcurrentDownloads conf.MaxConcurrentUploads = &maxConcurrentUploads conf.MaxDownloadAttempts = &maxDownloadAttempts flags.StringVar(&conf.ContainerdNamespace, "containerd-namespace", daemon.ContainersNamespace, "Containerd namespace to use") flags.StringVar(&conf.ContainerdPluginNamespace, "containerd-plugins-namespace", containerd.PluginNamespace, "Containerd namespace to use for plugins") flags.StringVar(&conf.DefaultRuntime, "default-runtime", config.StockRuntimeName, "Default OCI runtime for containers") return nil } func installRegistryServiceFlags(options *registry.ServiceOptions, flags *pflag.FlagSet) { ana := opts.NewNamedListOptsRef("allow-nondistributable-artifacts", &options.AllowNondistributableArtifacts, registry.ValidateIndexName) mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, registry.ValidateMirror) insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, registry.ValidateIndexName) flags.Var(ana, "allow-nondistributable-artifacts", "Allow push of nondistributable artifacts to registry") flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") }
package main import ( "runtime" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/config" "github.com/docker/docker/opts" "github.com/docker/docker/plugin/executor/containerd" "github.com/docker/docker/registry" "github.com/spf13/pflag" ) const ( // defaultShutdownTimeout is the default shutdown timeout for the daemon defaultShutdownTimeout = 15 // defaultTrustKeyFile is the default filename for the trust key defaultTrustKeyFile = "key.json" ) // installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) error { var maxConcurrentDownloads, maxConcurrentUploads, maxDownloadAttempts int defaultPidFile, err := getDefaultPidFile() if err != nil { return err } defaultDataRoot, err := getDefaultDataRoot() if err != nil { return err } defaultExecRoot, err := getDefaultExecRoot() if err != nil { return err } installRegistryServiceFlags(&conf.ServiceOptions, flags) flags.Var(opts.NewNamedListOptsRef("storage-opts", &conf.GraphOptions, nil), "storage-opt", "Storage driver options") flags.Var(opts.NewNamedListOptsRef("authorization-plugins", &conf.AuthorizationPlugins, nil), "authorization-plugin", "Authorization plugins to load") flags.Var(opts.NewNamedListOptsRef("exec-opts", &conf.ExecOptions, nil), "exec-opt", "Runtime execution options") flags.StringVarP(&conf.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file") flags.StringVarP(&conf.Root, "graph", "g", defaultDataRoot, "Root of the Docker runtime") flags.StringVar(&conf.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files") flags.StringVar(&conf.ContainerdAddr, "containerd", "", "containerd grpc address") flags.BoolVar(&conf.CriContainerd, "cri-containerd", false, "start containerd with cri") // "--graph" is "soft-deprecated" in favor of "data-root". This flag was added // before Docker 1.0, so won't be removed, only hidden, to discourage its usage. _ = flags.MarkHidden("graph") flags.StringVar(&conf.Root, "data-root", defaultDataRoot, "Root directory of persistent Docker state") flags.BoolVarP(&conf.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") _ = flags.MarkDeprecated("restart", "Please use a restart policy on docker run") // Windows doesn't support setting the storage driver - there is no choice as to which ones to use. if runtime.GOOS != "windows" { flags.StringVarP(&conf.GraphDriver, "storage-driver", "s", "", "Storage driver to use") } flags.IntVar(&conf.Mtu, "mtu", 0, "Set the containers network MTU") flags.BoolVar(&conf.RawLogs, "raw-logs", false, "Full timestamps without ANSI coloring") flags.Var(opts.NewListOptsRef(&conf.DNS, opts.ValidateIPAddress), "dns", "DNS server to use") flags.Var(opts.NewNamedListOptsRef("dns-opts", &conf.DNSOptions, nil), "dns-opt", "DNS options to use") flags.Var(opts.NewListOptsRef(&conf.DNSSearch, opts.ValidateDNSSearch), "dns-search", "DNS search domains to use") flags.Var(opts.NewIPOpt(&conf.HostGatewayIP, ""), "host-gateway-ip", "IP address that the special 'host-gateway' string in --add-host resolves to. Defaults to the IP address of the default bridge") flags.Var(opts.NewNamedListOptsRef("labels", &conf.Labels, opts.ValidateLabel), "label", "Set key=value labels to the daemon") flags.StringVar(&conf.LogConfig.Type, "log-driver", "json-file", "Default driver for container logs") flags.Var(opts.NewNamedMapOpts("log-opts", conf.LogConfig.Config, nil), "log-opt", "Default log driver options for containers") flags.StringVar(&conf.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise") _ = flags.MarkDeprecated("cluster-advertise", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.StringVar(&conf.ClusterStore, "cluster-store", "", "URL of the distributed storage backend") _ = flags.MarkDeprecated("cluster-store", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.Var(opts.NewNamedMapOpts("cluster-store-opts", conf.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options") _ = flags.MarkDeprecated("cluster-store-opt", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.StringVar(&conf.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API") flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", config.DefaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull") flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", config.DefaultMaxConcurrentUploads, "Set the max concurrent uploads for each push") flags.IntVar(&maxDownloadAttempts, "max-download-attempts", config.DefaultDownloadAttempts, "Set the max download attempts for each pull") flags.IntVar(&conf.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout") flags.IntVar(&conf.NetworkDiagnosticPort, "network-diagnostic-port", 0, "TCP port number of the network diagnostic server") _ = flags.MarkHidden("network-diagnostic-port") flags.StringVar(&conf.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address") flags.BoolVar(&conf.Experimental, "experimental", false, "Enable experimental features") flags.StringVar(&conf.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on") flags.Var(opts.NewNamedListOptsRef("node-generic-resources", &conf.NodeGenericResources, opts.ValidateSingleGenericResource), "node-generic-resource", "Advertise user-defined resource") flags.IntVar(&conf.NetworkControlPlaneMTU, "network-control-plane-mtu", config.DefaultNetworkMtu, "Network Control plane MTU") conf.MaxConcurrentDownloads = &maxConcurrentDownloads conf.MaxConcurrentUploads = &maxConcurrentUploads conf.MaxDownloadAttempts = &maxDownloadAttempts flags.StringVar(&conf.ContainerdNamespace, "containerd-namespace", daemon.ContainersNamespace, "Containerd namespace to use") flags.StringVar(&conf.ContainerdPluginNamespace, "containerd-plugins-namespace", containerd.PluginNamespace, "Containerd namespace to use for plugins") flags.StringVar(&conf.DefaultRuntime, "default-runtime", config.StockRuntimeName, "Default OCI runtime for containers") flags.StringVar(&conf.HTTPProxy, "http-proxy", "", "HTTP proxy URL to use for outgoing traffic") flags.StringVar(&conf.HTTPSProxy, "https-proxy", "", "HTTPS proxy URL to use for outgoing traffic") flags.StringVar(&conf.NoProxy, "no-proxy", "", "Comma-separated list of hosts or IP addresses for which the proxy is skipped") return nil } func installRegistryServiceFlags(options *registry.ServiceOptions, flags *pflag.FlagSet) { ana := opts.NewNamedListOptsRef("allow-nondistributable-artifacts", &options.AllowNondistributableArtifacts, registry.ValidateIndexName) mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, registry.ValidateMirror) insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, registry.ValidateIndexName) flags.Var(ana, "allow-nondistributable-artifacts", "Allow push of nondistributable artifacts to registry") flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
should we remove the `user:pass` from the examples here? Not sure it's something we really want to _encourage_ (I know some setups use it, but it's not "ideal")
thaJeztah
4,452
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
cmd/dockerd/config.go
package main import ( "runtime" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/config" "github.com/docker/docker/opts" "github.com/docker/docker/plugin/executor/containerd" "github.com/docker/docker/registry" "github.com/spf13/pflag" ) const ( // defaultShutdownTimeout is the default shutdown timeout for the daemon defaultShutdownTimeout = 15 // defaultTrustKeyFile is the default filename for the trust key defaultTrustKeyFile = "key.json" ) // installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) error { var maxConcurrentDownloads, maxConcurrentUploads, maxDownloadAttempts int defaultPidFile, err := getDefaultPidFile() if err != nil { return err } defaultDataRoot, err := getDefaultDataRoot() if err != nil { return err } defaultExecRoot, err := getDefaultExecRoot() if err != nil { return err } installRegistryServiceFlags(&conf.ServiceOptions, flags) flags.Var(opts.NewNamedListOptsRef("storage-opts", &conf.GraphOptions, nil), "storage-opt", "Storage driver options") flags.Var(opts.NewNamedListOptsRef("authorization-plugins", &conf.AuthorizationPlugins, nil), "authorization-plugin", "Authorization plugins to load") flags.Var(opts.NewNamedListOptsRef("exec-opts", &conf.ExecOptions, nil), "exec-opt", "Runtime execution options") flags.StringVarP(&conf.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file") flags.StringVarP(&conf.Root, "graph", "g", defaultDataRoot, "Root of the Docker runtime") flags.StringVar(&conf.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files") flags.StringVar(&conf.ContainerdAddr, "containerd", "", "containerd grpc address") flags.BoolVar(&conf.CriContainerd, "cri-containerd", false, "start containerd with cri") // "--graph" is "soft-deprecated" in favor of "data-root". This flag was added // before Docker 1.0, so won't be removed, only hidden, to discourage its usage. _ = flags.MarkHidden("graph") flags.StringVar(&conf.Root, "data-root", defaultDataRoot, "Root directory of persistent Docker state") flags.BoolVarP(&conf.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") _ = flags.MarkDeprecated("restart", "Please use a restart policy on docker run") // Windows doesn't support setting the storage driver - there is no choice as to which ones to use. if runtime.GOOS != "windows" { flags.StringVarP(&conf.GraphDriver, "storage-driver", "s", "", "Storage driver to use") } flags.IntVar(&conf.Mtu, "mtu", 0, "Set the containers network MTU") flags.BoolVar(&conf.RawLogs, "raw-logs", false, "Full timestamps without ANSI coloring") flags.Var(opts.NewListOptsRef(&conf.DNS, opts.ValidateIPAddress), "dns", "DNS server to use") flags.Var(opts.NewNamedListOptsRef("dns-opts", &conf.DNSOptions, nil), "dns-opt", "DNS options to use") flags.Var(opts.NewListOptsRef(&conf.DNSSearch, opts.ValidateDNSSearch), "dns-search", "DNS search domains to use") flags.Var(opts.NewIPOpt(&conf.HostGatewayIP, ""), "host-gateway-ip", "IP address that the special 'host-gateway' string in --add-host resolves to. Defaults to the IP address of the default bridge") flags.Var(opts.NewNamedListOptsRef("labels", &conf.Labels, opts.ValidateLabel), "label", "Set key=value labels to the daemon") flags.StringVar(&conf.LogConfig.Type, "log-driver", "json-file", "Default driver for container logs") flags.Var(opts.NewNamedMapOpts("log-opts", conf.LogConfig.Config, nil), "log-opt", "Default log driver options for containers") flags.StringVar(&conf.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise") _ = flags.MarkDeprecated("cluster-advertise", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.StringVar(&conf.ClusterStore, "cluster-store", "", "URL of the distributed storage backend") _ = flags.MarkDeprecated("cluster-store", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.Var(opts.NewNamedMapOpts("cluster-store-opts", conf.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options") _ = flags.MarkDeprecated("cluster-store-opt", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.StringVar(&conf.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API") flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", config.DefaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull") flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", config.DefaultMaxConcurrentUploads, "Set the max concurrent uploads for each push") flags.IntVar(&maxDownloadAttempts, "max-download-attempts", config.DefaultDownloadAttempts, "Set the max download attempts for each pull") flags.IntVar(&conf.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout") flags.IntVar(&conf.NetworkDiagnosticPort, "network-diagnostic-port", 0, "TCP port number of the network diagnostic server") _ = flags.MarkHidden("network-diagnostic-port") flags.StringVar(&conf.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address") flags.BoolVar(&conf.Experimental, "experimental", false, "Enable experimental features") flags.StringVar(&conf.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on") flags.Var(opts.NewNamedListOptsRef("node-generic-resources", &conf.NodeGenericResources, opts.ValidateSingleGenericResource), "node-generic-resource", "Advertise user-defined resource") flags.IntVar(&conf.NetworkControlPlaneMTU, "network-control-plane-mtu", config.DefaultNetworkMtu, "Network Control plane MTU") conf.MaxConcurrentDownloads = &maxConcurrentDownloads conf.MaxConcurrentUploads = &maxConcurrentUploads conf.MaxDownloadAttempts = &maxDownloadAttempts flags.StringVar(&conf.ContainerdNamespace, "containerd-namespace", daemon.ContainersNamespace, "Containerd namespace to use") flags.StringVar(&conf.ContainerdPluginNamespace, "containerd-plugins-namespace", containerd.PluginNamespace, "Containerd namespace to use for plugins") flags.StringVar(&conf.DefaultRuntime, "default-runtime", config.StockRuntimeName, "Default OCI runtime for containers") return nil } func installRegistryServiceFlags(options *registry.ServiceOptions, flags *pflag.FlagSet) { ana := opts.NewNamedListOptsRef("allow-nondistributable-artifacts", &options.AllowNondistributableArtifacts, registry.ValidateIndexName) mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, registry.ValidateMirror) insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, registry.ValidateIndexName) flags.Var(ana, "allow-nondistributable-artifacts", "Allow push of nondistributable artifacts to registry") flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") }
package main import ( "runtime" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/config" "github.com/docker/docker/opts" "github.com/docker/docker/plugin/executor/containerd" "github.com/docker/docker/registry" "github.com/spf13/pflag" ) const ( // defaultShutdownTimeout is the default shutdown timeout for the daemon defaultShutdownTimeout = 15 // defaultTrustKeyFile is the default filename for the trust key defaultTrustKeyFile = "key.json" ) // installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) error { var maxConcurrentDownloads, maxConcurrentUploads, maxDownloadAttempts int defaultPidFile, err := getDefaultPidFile() if err != nil { return err } defaultDataRoot, err := getDefaultDataRoot() if err != nil { return err } defaultExecRoot, err := getDefaultExecRoot() if err != nil { return err } installRegistryServiceFlags(&conf.ServiceOptions, flags) flags.Var(opts.NewNamedListOptsRef("storage-opts", &conf.GraphOptions, nil), "storage-opt", "Storage driver options") flags.Var(opts.NewNamedListOptsRef("authorization-plugins", &conf.AuthorizationPlugins, nil), "authorization-plugin", "Authorization plugins to load") flags.Var(opts.NewNamedListOptsRef("exec-opts", &conf.ExecOptions, nil), "exec-opt", "Runtime execution options") flags.StringVarP(&conf.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file") flags.StringVarP(&conf.Root, "graph", "g", defaultDataRoot, "Root of the Docker runtime") flags.StringVar(&conf.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files") flags.StringVar(&conf.ContainerdAddr, "containerd", "", "containerd grpc address") flags.BoolVar(&conf.CriContainerd, "cri-containerd", false, "start containerd with cri") // "--graph" is "soft-deprecated" in favor of "data-root". This flag was added // before Docker 1.0, so won't be removed, only hidden, to discourage its usage. _ = flags.MarkHidden("graph") flags.StringVar(&conf.Root, "data-root", defaultDataRoot, "Root directory of persistent Docker state") flags.BoolVarP(&conf.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") _ = flags.MarkDeprecated("restart", "Please use a restart policy on docker run") // Windows doesn't support setting the storage driver - there is no choice as to which ones to use. if runtime.GOOS != "windows" { flags.StringVarP(&conf.GraphDriver, "storage-driver", "s", "", "Storage driver to use") } flags.IntVar(&conf.Mtu, "mtu", 0, "Set the containers network MTU") flags.BoolVar(&conf.RawLogs, "raw-logs", false, "Full timestamps without ANSI coloring") flags.Var(opts.NewListOptsRef(&conf.DNS, opts.ValidateIPAddress), "dns", "DNS server to use") flags.Var(opts.NewNamedListOptsRef("dns-opts", &conf.DNSOptions, nil), "dns-opt", "DNS options to use") flags.Var(opts.NewListOptsRef(&conf.DNSSearch, opts.ValidateDNSSearch), "dns-search", "DNS search domains to use") flags.Var(opts.NewIPOpt(&conf.HostGatewayIP, ""), "host-gateway-ip", "IP address that the special 'host-gateway' string in --add-host resolves to. Defaults to the IP address of the default bridge") flags.Var(opts.NewNamedListOptsRef("labels", &conf.Labels, opts.ValidateLabel), "label", "Set key=value labels to the daemon") flags.StringVar(&conf.LogConfig.Type, "log-driver", "json-file", "Default driver for container logs") flags.Var(opts.NewNamedMapOpts("log-opts", conf.LogConfig.Config, nil), "log-opt", "Default log driver options for containers") flags.StringVar(&conf.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise") _ = flags.MarkDeprecated("cluster-advertise", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.StringVar(&conf.ClusterStore, "cluster-store", "", "URL of the distributed storage backend") _ = flags.MarkDeprecated("cluster-store", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.Var(opts.NewNamedMapOpts("cluster-store-opts", conf.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options") _ = flags.MarkDeprecated("cluster-store-opt", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.StringVar(&conf.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API") flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", config.DefaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull") flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", config.DefaultMaxConcurrentUploads, "Set the max concurrent uploads for each push") flags.IntVar(&maxDownloadAttempts, "max-download-attempts", config.DefaultDownloadAttempts, "Set the max download attempts for each pull") flags.IntVar(&conf.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout") flags.IntVar(&conf.NetworkDiagnosticPort, "network-diagnostic-port", 0, "TCP port number of the network diagnostic server") _ = flags.MarkHidden("network-diagnostic-port") flags.StringVar(&conf.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address") flags.BoolVar(&conf.Experimental, "experimental", false, "Enable experimental features") flags.StringVar(&conf.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on") flags.Var(opts.NewNamedListOptsRef("node-generic-resources", &conf.NodeGenericResources, opts.ValidateSingleGenericResource), "node-generic-resource", "Advertise user-defined resource") flags.IntVar(&conf.NetworkControlPlaneMTU, "network-control-plane-mtu", config.DefaultNetworkMtu, "Network Control plane MTU") conf.MaxConcurrentDownloads = &maxConcurrentDownloads conf.MaxConcurrentUploads = &maxConcurrentUploads conf.MaxDownloadAttempts = &maxDownloadAttempts flags.StringVar(&conf.ContainerdNamespace, "containerd-namespace", daemon.ContainersNamespace, "Containerd namespace to use") flags.StringVar(&conf.ContainerdPluginNamespace, "containerd-plugins-namespace", containerd.PluginNamespace, "Containerd namespace to use for plugins") flags.StringVar(&conf.DefaultRuntime, "default-runtime", config.StockRuntimeName, "Default OCI runtime for containers") flags.StringVar(&conf.HTTPProxy, "http-proxy", "", "HTTP proxy URL to use for outgoing traffic") flags.StringVar(&conf.HTTPSProxy, "https-proxy", "", "HTTPS proxy URL to use for outgoing traffic") flags.StringVar(&conf.NoProxy, "no-proxy", "", "Comma-separated list of hosts or IP addresses for which the proxy is skipped") return nil } func installRegistryServiceFlags(options *registry.ServiceOptions, flags *pflag.FlagSet) { ana := opts.NewNamedListOptsRef("allow-nondistributable-artifacts", &options.AllowNondistributableArtifacts, registry.ValidateIndexName) mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, registry.ValidateMirror) insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, registry.ValidateIndexName) flags.Var(ana, "allow-nondistributable-artifacts", "Allow push of nondistributable artifacts to registry") flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
I guess the theory was probably to loosely document the syntax for users who need authentication, but I agree with your suggestion -- we probably don't want to encourage it, and users who need it will likely try this syntax first anyhow.
tianon
4,453
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
cmd/dockerd/config.go
package main import ( "runtime" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/config" "github.com/docker/docker/opts" "github.com/docker/docker/plugin/executor/containerd" "github.com/docker/docker/registry" "github.com/spf13/pflag" ) const ( // defaultShutdownTimeout is the default shutdown timeout for the daemon defaultShutdownTimeout = 15 // defaultTrustKeyFile is the default filename for the trust key defaultTrustKeyFile = "key.json" ) // installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) error { var maxConcurrentDownloads, maxConcurrentUploads, maxDownloadAttempts int defaultPidFile, err := getDefaultPidFile() if err != nil { return err } defaultDataRoot, err := getDefaultDataRoot() if err != nil { return err } defaultExecRoot, err := getDefaultExecRoot() if err != nil { return err } installRegistryServiceFlags(&conf.ServiceOptions, flags) flags.Var(opts.NewNamedListOptsRef("storage-opts", &conf.GraphOptions, nil), "storage-opt", "Storage driver options") flags.Var(opts.NewNamedListOptsRef("authorization-plugins", &conf.AuthorizationPlugins, nil), "authorization-plugin", "Authorization plugins to load") flags.Var(opts.NewNamedListOptsRef("exec-opts", &conf.ExecOptions, nil), "exec-opt", "Runtime execution options") flags.StringVarP(&conf.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file") flags.StringVarP(&conf.Root, "graph", "g", defaultDataRoot, "Root of the Docker runtime") flags.StringVar(&conf.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files") flags.StringVar(&conf.ContainerdAddr, "containerd", "", "containerd grpc address") flags.BoolVar(&conf.CriContainerd, "cri-containerd", false, "start containerd with cri") // "--graph" is "soft-deprecated" in favor of "data-root". This flag was added // before Docker 1.0, so won't be removed, only hidden, to discourage its usage. _ = flags.MarkHidden("graph") flags.StringVar(&conf.Root, "data-root", defaultDataRoot, "Root directory of persistent Docker state") flags.BoolVarP(&conf.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") _ = flags.MarkDeprecated("restart", "Please use a restart policy on docker run") // Windows doesn't support setting the storage driver - there is no choice as to which ones to use. if runtime.GOOS != "windows" { flags.StringVarP(&conf.GraphDriver, "storage-driver", "s", "", "Storage driver to use") } flags.IntVar(&conf.Mtu, "mtu", 0, "Set the containers network MTU") flags.BoolVar(&conf.RawLogs, "raw-logs", false, "Full timestamps without ANSI coloring") flags.Var(opts.NewListOptsRef(&conf.DNS, opts.ValidateIPAddress), "dns", "DNS server to use") flags.Var(opts.NewNamedListOptsRef("dns-opts", &conf.DNSOptions, nil), "dns-opt", "DNS options to use") flags.Var(opts.NewListOptsRef(&conf.DNSSearch, opts.ValidateDNSSearch), "dns-search", "DNS search domains to use") flags.Var(opts.NewIPOpt(&conf.HostGatewayIP, ""), "host-gateway-ip", "IP address that the special 'host-gateway' string in --add-host resolves to. Defaults to the IP address of the default bridge") flags.Var(opts.NewNamedListOptsRef("labels", &conf.Labels, opts.ValidateLabel), "label", "Set key=value labels to the daemon") flags.StringVar(&conf.LogConfig.Type, "log-driver", "json-file", "Default driver for container logs") flags.Var(opts.NewNamedMapOpts("log-opts", conf.LogConfig.Config, nil), "log-opt", "Default log driver options for containers") flags.StringVar(&conf.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise") _ = flags.MarkDeprecated("cluster-advertise", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.StringVar(&conf.ClusterStore, "cluster-store", "", "URL of the distributed storage backend") _ = flags.MarkDeprecated("cluster-store", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.Var(opts.NewNamedMapOpts("cluster-store-opts", conf.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options") _ = flags.MarkDeprecated("cluster-store-opt", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.StringVar(&conf.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API") flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", config.DefaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull") flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", config.DefaultMaxConcurrentUploads, "Set the max concurrent uploads for each push") flags.IntVar(&maxDownloadAttempts, "max-download-attempts", config.DefaultDownloadAttempts, "Set the max download attempts for each pull") flags.IntVar(&conf.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout") flags.IntVar(&conf.NetworkDiagnosticPort, "network-diagnostic-port", 0, "TCP port number of the network diagnostic server") _ = flags.MarkHidden("network-diagnostic-port") flags.StringVar(&conf.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address") flags.BoolVar(&conf.Experimental, "experimental", false, "Enable experimental features") flags.StringVar(&conf.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on") flags.Var(opts.NewNamedListOptsRef("node-generic-resources", &conf.NodeGenericResources, opts.ValidateSingleGenericResource), "node-generic-resource", "Advertise user-defined resource") flags.IntVar(&conf.NetworkControlPlaneMTU, "network-control-plane-mtu", config.DefaultNetworkMtu, "Network Control plane MTU") conf.MaxConcurrentDownloads = &maxConcurrentDownloads conf.MaxConcurrentUploads = &maxConcurrentUploads conf.MaxDownloadAttempts = &maxDownloadAttempts flags.StringVar(&conf.ContainerdNamespace, "containerd-namespace", daemon.ContainersNamespace, "Containerd namespace to use") flags.StringVar(&conf.ContainerdPluginNamespace, "containerd-plugins-namespace", containerd.PluginNamespace, "Containerd namespace to use for plugins") flags.StringVar(&conf.DefaultRuntime, "default-runtime", config.StockRuntimeName, "Default OCI runtime for containers") return nil } func installRegistryServiceFlags(options *registry.ServiceOptions, flags *pflag.FlagSet) { ana := opts.NewNamedListOptsRef("allow-nondistributable-artifacts", &options.AllowNondistributableArtifacts, registry.ValidateIndexName) mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, registry.ValidateMirror) insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, registry.ValidateIndexName) flags.Var(ana, "allow-nondistributable-artifacts", "Allow push of nondistributable artifacts to registry") flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") }
package main import ( "runtime" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/config" "github.com/docker/docker/opts" "github.com/docker/docker/plugin/executor/containerd" "github.com/docker/docker/registry" "github.com/spf13/pflag" ) const ( // defaultShutdownTimeout is the default shutdown timeout for the daemon defaultShutdownTimeout = 15 // defaultTrustKeyFile is the default filename for the trust key defaultTrustKeyFile = "key.json" ) // installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) error { var maxConcurrentDownloads, maxConcurrentUploads, maxDownloadAttempts int defaultPidFile, err := getDefaultPidFile() if err != nil { return err } defaultDataRoot, err := getDefaultDataRoot() if err != nil { return err } defaultExecRoot, err := getDefaultExecRoot() if err != nil { return err } installRegistryServiceFlags(&conf.ServiceOptions, flags) flags.Var(opts.NewNamedListOptsRef("storage-opts", &conf.GraphOptions, nil), "storage-opt", "Storage driver options") flags.Var(opts.NewNamedListOptsRef("authorization-plugins", &conf.AuthorizationPlugins, nil), "authorization-plugin", "Authorization plugins to load") flags.Var(opts.NewNamedListOptsRef("exec-opts", &conf.ExecOptions, nil), "exec-opt", "Runtime execution options") flags.StringVarP(&conf.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file") flags.StringVarP(&conf.Root, "graph", "g", defaultDataRoot, "Root of the Docker runtime") flags.StringVar(&conf.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files") flags.StringVar(&conf.ContainerdAddr, "containerd", "", "containerd grpc address") flags.BoolVar(&conf.CriContainerd, "cri-containerd", false, "start containerd with cri") // "--graph" is "soft-deprecated" in favor of "data-root". This flag was added // before Docker 1.0, so won't be removed, only hidden, to discourage its usage. _ = flags.MarkHidden("graph") flags.StringVar(&conf.Root, "data-root", defaultDataRoot, "Root directory of persistent Docker state") flags.BoolVarP(&conf.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") _ = flags.MarkDeprecated("restart", "Please use a restart policy on docker run") // Windows doesn't support setting the storage driver - there is no choice as to which ones to use. if runtime.GOOS != "windows" { flags.StringVarP(&conf.GraphDriver, "storage-driver", "s", "", "Storage driver to use") } flags.IntVar(&conf.Mtu, "mtu", 0, "Set the containers network MTU") flags.BoolVar(&conf.RawLogs, "raw-logs", false, "Full timestamps without ANSI coloring") flags.Var(opts.NewListOptsRef(&conf.DNS, opts.ValidateIPAddress), "dns", "DNS server to use") flags.Var(opts.NewNamedListOptsRef("dns-opts", &conf.DNSOptions, nil), "dns-opt", "DNS options to use") flags.Var(opts.NewListOptsRef(&conf.DNSSearch, opts.ValidateDNSSearch), "dns-search", "DNS search domains to use") flags.Var(opts.NewIPOpt(&conf.HostGatewayIP, ""), "host-gateway-ip", "IP address that the special 'host-gateway' string in --add-host resolves to. Defaults to the IP address of the default bridge") flags.Var(opts.NewNamedListOptsRef("labels", &conf.Labels, opts.ValidateLabel), "label", "Set key=value labels to the daemon") flags.StringVar(&conf.LogConfig.Type, "log-driver", "json-file", "Default driver for container logs") flags.Var(opts.NewNamedMapOpts("log-opts", conf.LogConfig.Config, nil), "log-opt", "Default log driver options for containers") flags.StringVar(&conf.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise") _ = flags.MarkDeprecated("cluster-advertise", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.StringVar(&conf.ClusterStore, "cluster-store", "", "URL of the distributed storage backend") _ = flags.MarkDeprecated("cluster-store", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.Var(opts.NewNamedMapOpts("cluster-store-opts", conf.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options") _ = flags.MarkDeprecated("cluster-store-opt", "Swarm classic is deprecated. Please use Swarm-mode (docker swarm init)") flags.StringVar(&conf.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API") flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", config.DefaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull") flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", config.DefaultMaxConcurrentUploads, "Set the max concurrent uploads for each push") flags.IntVar(&maxDownloadAttempts, "max-download-attempts", config.DefaultDownloadAttempts, "Set the max download attempts for each pull") flags.IntVar(&conf.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout") flags.IntVar(&conf.NetworkDiagnosticPort, "network-diagnostic-port", 0, "TCP port number of the network diagnostic server") _ = flags.MarkHidden("network-diagnostic-port") flags.StringVar(&conf.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address") flags.BoolVar(&conf.Experimental, "experimental", false, "Enable experimental features") flags.StringVar(&conf.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on") flags.Var(opts.NewNamedListOptsRef("node-generic-resources", &conf.NodeGenericResources, opts.ValidateSingleGenericResource), "node-generic-resource", "Advertise user-defined resource") flags.IntVar(&conf.NetworkControlPlaneMTU, "network-control-plane-mtu", config.DefaultNetworkMtu, "Network Control plane MTU") conf.MaxConcurrentDownloads = &maxConcurrentDownloads conf.MaxConcurrentUploads = &maxConcurrentUploads conf.MaxDownloadAttempts = &maxDownloadAttempts flags.StringVar(&conf.ContainerdNamespace, "containerd-namespace", daemon.ContainersNamespace, "Containerd namespace to use") flags.StringVar(&conf.ContainerdPluginNamespace, "containerd-plugins-namespace", containerd.PluginNamespace, "Containerd namespace to use for plugins") flags.StringVar(&conf.DefaultRuntime, "default-runtime", config.StockRuntimeName, "Default OCI runtime for containers") flags.StringVar(&conf.HTTPProxy, "http-proxy", "", "HTTP proxy URL to use for outgoing traffic") flags.StringVar(&conf.HTTPSProxy, "https-proxy", "", "HTTPS proxy URL to use for outgoing traffic") flags.StringVar(&conf.NoProxy, "no-proxy", "", "Comma-separated list of hosts or IP addresses for which the proxy is skipped") return nil } func installRegistryServiceFlags(options *registry.ServiceOptions, flags *pflag.FlagSet) { ana := opts.NewNamedListOptsRef("allow-nondistributable-artifacts", &options.AllowNondistributableArtifacts, registry.ValidateIndexName) mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, registry.ValidateMirror) insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, registry.ValidateIndexName) flags.Var(ana, "allow-nondistributable-artifacts", "Allow push of nondistributable artifacts to registry") flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
```suggestion flags.StringVar(&conf.NoProxy, "no-proxy", "", "Comma-separated list of hosts or IP addresses for which the proxy is skipped") ```
tianon
4,454
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
cmd/dockerd/daemon.go
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } configureProxyEnv(cli.Config) warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil } func configureProxyEnv(conf *config.Config) { if p := conf.HTTPProxy; p != "" { overrideProxyEnv("HTTP_PROXY", p) overrideProxyEnv("http_proxy", p) } if p := conf.HTTPSProxy; p != "" { overrideProxyEnv("HTTPS_PROXY", p) overrideProxyEnv("https_proxy", p) } if p := conf.NoProxy; p != "" { overrideProxyEnv("NO_PROXY", p) overrideProxyEnv("no_proxy", p) } } func overrideProxyEnv(name, val string) { if oldVal := os.Getenv(name); oldVal != "" && oldVal != val { logrus.WithFields(logrus.Fields{ "name": name, "old-value": config.MaskCredentials(oldVal), "new-value": config.MaskCredentials(val), }).Warn("overriding existing proxy variable with value from configuration") } _ = os.Setenv(name, val) }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
I'm probably overthinking this, but we likely want the precedence of these to remain familiar to users, right? CLI flag > environment variable > config file? I'm not sure what the best way to accomplish that would be (nor if it's even worth doing so). (If we were able to resolve that disparity somehow, we could probably remove the `getConfigOrEnv` function. :grimacing:)
tianon
4,455
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
cmd/dockerd/daemon.go
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } configureProxyEnv(cli.Config) warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil } func configureProxyEnv(conf *config.Config) { if p := conf.HTTPProxy; p != "" { overrideProxyEnv("HTTP_PROXY", p) overrideProxyEnv("http_proxy", p) } if p := conf.HTTPSProxy; p != "" { overrideProxyEnv("HTTPS_PROXY", p) overrideProxyEnv("https_proxy", p) } if p := conf.NoProxy; p != "" { overrideProxyEnv("NO_PROXY", p) overrideProxyEnv("no_proxy", p) } } func overrideProxyEnv(name, val string) { if oldVal := os.Getenv(name); oldVal != "" && oldVal != val { logrus.WithFields(logrus.Fields{ "name": name, "old-value": config.MaskCredentials(oldVal), "new-value": config.MaskCredentials(val), }).Warn("overriding existing proxy variable with value from configuration") } _ = os.Setenv(name, val) }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
Hmm.. good point. I recall we had discussions around that when `daemon.json` was implemented. Afaik, we currently treat `--some-option` and `configfile.some-option` to be at the same level (which is why the daemon produces a "conflicting options" error if both are set). IIRC, the motivation at the time was that `--some-option` would be usually set in a config-file as well (e.g. a systemd unit), and therefore to be at the same level of priority. I _thought_ we made them both `> env-var`, but looks like for, e.g.`DOCKER_DRIVER`, we make the env-var take precedence, i.e. the following will use `overlay2`, not `vfs`; ```bash DOCKER_DRIVER=overlay2 dockerd --debug --storage-driver=vfs ``` I thought it would make sense to make the config (or --flag) > env-var to allow overriding any default proxy that's set in the environment, but perhaps that was the wrong idea 🤔
thaJeztah
4,456
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
cmd/dockerd/daemon.go
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } configureProxyEnv(cli.Config) warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil } func configureProxyEnv(conf *config.Config) { if p := conf.HTTPProxy; p != "" { overrideProxyEnv("HTTP_PROXY", p) overrideProxyEnv("http_proxy", p) } if p := conf.HTTPSProxy; p != "" { overrideProxyEnv("HTTPS_PROXY", p) overrideProxyEnv("https_proxy", p) } if p := conf.NoProxy; p != "" { overrideProxyEnv("NO_PROXY", p) overrideProxyEnv("no_proxy", p) } } func overrideProxyEnv(name, val string) { if oldVal := os.Getenv(name); oldVal != "" && oldVal != val { logrus.WithFields(logrus.Fields{ "name": name, "old-value": config.MaskCredentials(oldVal), "new-value": config.MaskCredentials(val), }).Warn("overriding existing proxy variable with value from configuration") } _ = os.Setenv(name, val) }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
^^ discussing the above in the maintainers meeting, and we'll keep the current implementation (allowboth `--flag` and `daemon.json` to override the "system" configuration), but if we find that a proxy env-var is present, we'll print a warning in the daemon logs.
thaJeztah
4,457
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
cmd/dockerd/daemon.go
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } configureProxyEnv(cli.Config) warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil } func configureProxyEnv(conf *config.Config) { if p := conf.HTTPProxy; p != "" { overrideProxyEnv("HTTP_PROXY", p) overrideProxyEnv("http_proxy", p) } if p := conf.HTTPSProxy; p != "" { overrideProxyEnv("HTTPS_PROXY", p) overrideProxyEnv("https_proxy", p) } if p := conf.NoProxy; p != "" { overrideProxyEnv("NO_PROXY", p) overrideProxyEnv("no_proxy", p) } } func overrideProxyEnv(name, val string) { if oldVal := os.Getenv(name); oldVal != "" && oldVal != val { logrus.WithFields(logrus.Fields{ "name": name, "old-value": config.MaskCredentials(oldVal), "new-value": config.MaskCredentials(val), }).Warn("overriding existing proxy variable with value from configuration") } _ = os.Setenv(name, val) }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
I guess if you want to avoid warnings on reload, this should also allow `oldVal == val`, right? ```suggestion if oldVal := os.Getenv(name); oldVal != "" && oldVal != val { ```
tianon
4,458
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
cmd/dockerd/daemon.go
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } configureProxyEnv(cli.Config) warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil } func configureProxyEnv(conf *config.Config) { if p := conf.HTTPProxy; p != "" { overrideProxyEnv("HTTP_PROXY", p) overrideProxyEnv("http_proxy", p) } if p := conf.HTTPSProxy; p != "" { overrideProxyEnv("HTTPS_PROXY", p) overrideProxyEnv("https_proxy", p) } if p := conf.NoProxy; p != "" { overrideProxyEnv("NO_PROXY", p) overrideProxyEnv("no_proxy", p) } } func overrideProxyEnv(name, val string) { if oldVal := os.Getenv(name); oldVal != "" && oldVal != val { logrus.WithFields(logrus.Fields{ "name": name, "old-value": config.MaskCredentials(oldVal), "new-value": config.MaskCredentials(val), }).Warn("overriding existing proxy variable with value from configuration") } _ = os.Setenv(name, val) }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
Wah, just notice I missed your comment here; let me have a look at that as well
thaJeztah
4,459
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
cmd/dockerd/daemon.go
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } configureProxyEnv(cli.Config) warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil } func configureProxyEnv(conf *config.Config) { if p := conf.HTTPProxy; p != "" { overrideProxyEnv("HTTP_PROXY", p) overrideProxyEnv("http_proxy", p) } if p := conf.HTTPSProxy; p != "" { overrideProxyEnv("HTTPS_PROXY", p) overrideProxyEnv("https_proxy", p) } if p := conf.NoProxy; p != "" { overrideProxyEnv("NO_PROXY", p) overrideProxyEnv("no_proxy", p) } } func overrideProxyEnv(name, val string) { if oldVal := os.Getenv(name); oldVal != "" && oldVal != val { logrus.WithFields(logrus.Fields{ "name": name, "old-value": config.MaskCredentials(oldVal), "new-value": config.MaskCredentials(val), }).Warn("overriding existing proxy variable with value from configuration") } _ = os.Setenv(name, val) }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
Added your suggestion
thaJeztah
4,460
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
daemon/reload.go
package daemon // import "github.com/docker/docker/daemon" import ( "encoding/json" "fmt" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/discovery" "github.com/sirupsen/logrus" ) // Reload reads configuration changes and modifies the // daemon according to those changes. // These are the settings that Reload changes: // - Platform runtime // - Daemon debug log level // - Daemon max concurrent downloads // - Daemon max concurrent uploads // - Daemon max download attempts // - Daemon shutdown timeout (in seconds) // - Cluster discovery (reconfigure and restart) // - Daemon labels // - Insecure registries // - Registry mirrors // - Daemon live restore func (daemon *Daemon) Reload(conf *config.Config) (err error) { daemon.configStore.Lock() attributes := map[string]string{} defer func() { jsonString, _ := json.Marshal(daemon.configStore) // we're unlocking here, because // LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes() // holds that lock too. daemon.configStore.Unlock() if err == nil { logrus.Infof("Reloaded configuration: %s", jsonString) daemon.LogDaemonEventWithAttributes("reload", attributes) } }() if err := daemon.reloadPlatform(conf, attributes); err != nil { return err } daemon.reloadDebug(conf, attributes) daemon.reloadMaxConcurrentDownloadsAndUploads(conf, attributes) if err := daemon.reloadMaxDownloadAttempts(conf, attributes); err != nil { return err } daemon.reloadShutdownTimeout(conf, attributes) daemon.reloadFeatures(conf, attributes) if err := daemon.reloadClusterDiscovery(conf, attributes); err != nil { return err } if err := daemon.reloadLabels(conf, attributes); err != nil { return err } if err := daemon.reloadAllowNondistributableArtifacts(conf, attributes); err != nil { return err } if err := daemon.reloadInsecureRegistries(conf, attributes); err != nil { return err } if err := daemon.reloadRegistryMirrors(conf, attributes); err != nil { return err } if err := daemon.reloadLiveRestore(conf, attributes); err != nil { return err } return daemon.reloadNetworkDiagnosticPort(conf, attributes) } // reloadDebug updates configuration with Debug option // and updates the passed attributes func (daemon *Daemon) reloadDebug(conf *config.Config, attributes map[string]string) { // update corresponding configuration if conf.IsValueSet("debug") { daemon.configStore.Debug = conf.Debug } // prepare reload event attributes with updatable configurations attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) } // reloadMaxConcurrentDownloadsAndUploads updates configuration with max concurrent // download and upload options and updates the passed attributes func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(conf *config.Config, attributes map[string]string) { // If no value is set for max-concurrent-downloads we assume it is the default value // We always "reset" as the cost is lightweight and easy to maintain. maxConcurrentDownloads := config.DefaultMaxConcurrentDownloads if conf.IsValueSet("max-concurrent-downloads") && conf.MaxConcurrentDownloads != nil { maxConcurrentDownloads = *conf.MaxConcurrentDownloads } daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) // If no value is set for max-concurrent-upload we assume it is the default value // We always "reset" as the cost is lightweight and easy to maintain. maxConcurrentUploads := config.DefaultMaxConcurrentUploads if conf.IsValueSet("max-concurrent-uploads") && conf.MaxConcurrentUploads != nil { maxConcurrentUploads = *conf.MaxConcurrentUploads } daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) if daemon.imageService != nil { daemon.imageService.UpdateConfig(&maxConcurrentDownloads, &maxConcurrentUploads) } // prepare reload event attributes with updatable configurations attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) // prepare reload event attributes with updatable configurations attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) } // reloadMaxDownloadAttempts updates configuration with max concurrent // download attempts when a connection is lost and updates the passed attributes func (daemon *Daemon) reloadMaxDownloadAttempts(conf *config.Config, attributes map[string]string) error { if err := config.ValidateMaxDownloadAttempts(conf); err != nil { return err } // If no value is set for max-download-attempts we assume it is the default value // We always "reset" as the cost is lightweight and easy to maintain. maxDownloadAttempts := config.DefaultDownloadAttempts if conf.IsValueSet("max-download-attempts") && conf.MaxDownloadAttempts != nil { maxDownloadAttempts = *conf.MaxDownloadAttempts } daemon.configStore.MaxDownloadAttempts = &maxDownloadAttempts logrus.Debugf("Reset Max Download Attempts: %d", *daemon.configStore.MaxDownloadAttempts) // prepare reload event attributes with updatable configurations attributes["max-download-attempts"] = fmt.Sprintf("%d", *daemon.configStore.MaxDownloadAttempts) return nil } // reloadShutdownTimeout updates configuration with daemon shutdown timeout option // and updates the passed attributes func (daemon *Daemon) reloadShutdownTimeout(conf *config.Config, attributes map[string]string) { // update corresponding configuration if conf.IsValueSet("shutdown-timeout") { daemon.configStore.ShutdownTimeout = conf.ShutdownTimeout logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout) } // prepare reload event attributes with updatable configurations attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout) } // reloadClusterDiscovery updates configuration with cluster discovery options // and updates the passed attributes func (daemon *Daemon) reloadClusterDiscovery(conf *config.Config, attributes map[string]string) (err error) { defer func() { // prepare reload event attributes with updatable configurations attributes["cluster-store"] = conf.ClusterStore attributes["cluster-advertise"] = conf.ClusterAdvertise attributes["cluster-store-opts"] = "{}" if daemon.configStore.ClusterOpts != nil { opts, err2 := json.Marshal(conf.ClusterOpts) if err != nil { err = err2 } attributes["cluster-store-opts"] = string(opts) } }() newAdvertise := conf.ClusterAdvertise newClusterStore := daemon.configStore.ClusterStore if conf.IsValueSet("cluster-advertise") { if conf.IsValueSet("cluster-store") { newClusterStore = conf.ClusterStore } newAdvertise, err = config.ParseClusterAdvertiseSettings(newClusterStore, conf.ClusterAdvertise) if err != nil && err != discovery.ErrDiscoveryDisabled { return err } } if daemon.clusterProvider != nil { if err := conf.IsSwarmCompatible(); err != nil { return err } } // check discovery modifications if !config.ModifiedDiscoverySettings(daemon.configStore, newClusterStore, newAdvertise, conf.ClusterOpts) { return nil } // enable discovery for the first time if it was not previously enabled if daemon.discoveryWatcher == nil { discoveryWatcher, err := discovery.Init(newClusterStore, newAdvertise, conf.ClusterOpts) if err != nil { return fmt.Errorf("failed to initialize discovery: %v", err) } daemon.discoveryWatcher = discoveryWatcher } else if err == discovery.ErrDiscoveryDisabled { // disable discovery if it was previously enabled and it's disabled now daemon.discoveryWatcher.Stop() } else if err = daemon.discoveryWatcher.Reload(conf.ClusterStore, newAdvertise, conf.ClusterOpts); err != nil { // reload discovery return err } daemon.configStore.ClusterStore = newClusterStore daemon.configStore.ClusterOpts = conf.ClusterOpts daemon.configStore.ClusterAdvertise = newAdvertise if daemon.netController == nil { return nil } netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil) if err != nil { logrus.WithError(err).Warn("failed to get options with network controller") return nil } err = daemon.netController.ReloadConfiguration(netOptions...) if err != nil { logrus.Warnf("Failed to reload configuration with network controller: %v", err) } return nil } // reloadLabels updates configuration with engine labels // and updates the passed attributes func (daemon *Daemon) reloadLabels(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("labels") { daemon.configStore.Labels = conf.Labels } // prepare reload event attributes with updatable configurations if daemon.configStore.Labels != nil { labels, err := json.Marshal(daemon.configStore.Labels) if err != nil { return err } attributes["labels"] = string(labels) } else { attributes["labels"] = "[]" } return nil } // reloadAllowNondistributableArtifacts updates the configuration with allow-nondistributable-artifacts options // and updates the passed attributes. func (daemon *Daemon) reloadAllowNondistributableArtifacts(conf *config.Config, attributes map[string]string) error { // Update corresponding configuration. if conf.IsValueSet("allow-nondistributable-artifacts") { daemon.configStore.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts if err := daemon.RegistryService.LoadAllowNondistributableArtifacts(conf.AllowNondistributableArtifacts); err != nil { return err } } // Prepare reload event attributes with updatable configurations. if daemon.configStore.AllowNondistributableArtifacts != nil { v, err := json.Marshal(daemon.configStore.AllowNondistributableArtifacts) if err != nil { return err } attributes["allow-nondistributable-artifacts"] = string(v) } else { attributes["allow-nondistributable-artifacts"] = "[]" } return nil } // reloadInsecureRegistries updates configuration with insecure registry option // and updates the passed attributes func (daemon *Daemon) reloadInsecureRegistries(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("insecure-registries") { daemon.configStore.InsecureRegistries = conf.InsecureRegistries if err := daemon.RegistryService.LoadInsecureRegistries(conf.InsecureRegistries); err != nil { return err } } // prepare reload event attributes with updatable configurations if daemon.configStore.InsecureRegistries != nil { insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries) if err != nil { return err } attributes["insecure-registries"] = string(insecureRegistries) } else { attributes["insecure-registries"] = "[]" } return nil } // reloadRegistryMirrors updates configuration with registry mirror options // and updates the passed attributes func (daemon *Daemon) reloadRegistryMirrors(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("registry-mirrors") { daemon.configStore.Mirrors = conf.Mirrors if err := daemon.RegistryService.LoadMirrors(conf.Mirrors); err != nil { return err } } // prepare reload event attributes with updatable configurations if daemon.configStore.Mirrors != nil { mirrors, err := json.Marshal(daemon.configStore.Mirrors) if err != nil { return err } attributes["registry-mirrors"] = string(mirrors) } else { attributes["registry-mirrors"] = "[]" } return nil } // reloadLiveRestore updates configuration with live restore option // and updates the passed attributes func (daemon *Daemon) reloadLiveRestore(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("live-restore") { daemon.configStore.LiveRestoreEnabled = conf.LiveRestoreEnabled } // prepare reload event attributes with updatable configurations attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled) return nil } // reloadNetworkDiagnosticPort updates the network controller starting the diagnostic if the config is valid func (daemon *Daemon) reloadNetworkDiagnosticPort(conf *config.Config, attributes map[string]string) error { if conf == nil || daemon.netController == nil || !conf.IsValueSet("network-diagnostic-port") || conf.NetworkDiagnosticPort < 1 || conf.NetworkDiagnosticPort > 65535 { // If there is no config make sure that the diagnostic is off if daemon.netController != nil { daemon.netController.StopDiagnostic() } return nil } // Enable the network diagnostic if the flag is set with a valid port within the range logrus.WithFields(logrus.Fields{"port": conf.NetworkDiagnosticPort, "ip": "127.0.0.1"}).Warn("Starting network diagnostic server") daemon.netController.StartDiagnostic(conf.NetworkDiagnosticPort) return nil } // reloadFeatures updates configuration with enabled/disabled features func (daemon *Daemon) reloadFeatures(conf *config.Config, attributes map[string]string) { // update corresponding configuration // note that we allow features option to be entirely unset daemon.configStore.Features = conf.Features // prepare reload event attributes with updatable configurations attributes["features"] = fmt.Sprintf("%v", daemon.configStore.Features) }
package daemon // import "github.com/docker/docker/daemon" import ( "encoding/json" "fmt" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/discovery" "github.com/sirupsen/logrus" ) // Reload reads configuration changes and modifies the // daemon according to those changes. // These are the settings that Reload changes: // - Platform runtime // - Daemon debug log level // - Daemon max concurrent downloads // - Daemon max concurrent uploads // - Daemon max download attempts // - Daemon shutdown timeout (in seconds) // - Cluster discovery (reconfigure and restart) // - Daemon labels // - Insecure registries // - Registry mirrors // - Daemon live restore func (daemon *Daemon) Reload(conf *config.Config) (err error) { daemon.configStore.Lock() attributes := map[string]string{} defer func() { if err == nil { jsonString, _ := json.Marshal(&struct { *config.Config config.ProxyConfig }{ Config: daemon.configStore, ProxyConfig: config.ProxyConfig{ HTTPProxy: config.MaskCredentials(daemon.configStore.HTTPProxy), HTTPSProxy: config.MaskCredentials(daemon.configStore.HTTPSProxy), NoProxy: config.MaskCredentials(daemon.configStore.NoProxy), }, }) logrus.Infof("Reloaded configuration: %s", jsonString) } // we're unlocking here, because // LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes() // holds that lock too. daemon.configStore.Unlock() if err == nil { daemon.LogDaemonEventWithAttributes("reload", attributes) } }() if err := daemon.reloadPlatform(conf, attributes); err != nil { return err } daemon.reloadDebug(conf, attributes) daemon.reloadMaxConcurrentDownloadsAndUploads(conf, attributes) if err := daemon.reloadMaxDownloadAttempts(conf, attributes); err != nil { return err } daemon.reloadShutdownTimeout(conf, attributes) daemon.reloadFeatures(conf, attributes) if err := daemon.reloadClusterDiscovery(conf, attributes); err != nil { return err } if err := daemon.reloadLabels(conf, attributes); err != nil { return err } if err := daemon.reloadAllowNondistributableArtifacts(conf, attributes); err != nil { return err } if err := daemon.reloadInsecureRegistries(conf, attributes); err != nil { return err } if err := daemon.reloadRegistryMirrors(conf, attributes); err != nil { return err } if err := daemon.reloadLiveRestore(conf, attributes); err != nil { return err } return daemon.reloadNetworkDiagnosticPort(conf, attributes) } // reloadDebug updates configuration with Debug option // and updates the passed attributes func (daemon *Daemon) reloadDebug(conf *config.Config, attributes map[string]string) { // update corresponding configuration if conf.IsValueSet("debug") { daemon.configStore.Debug = conf.Debug } // prepare reload event attributes with updatable configurations attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) } // reloadMaxConcurrentDownloadsAndUploads updates configuration with max concurrent // download and upload options and updates the passed attributes func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(conf *config.Config, attributes map[string]string) { // If no value is set for max-concurrent-downloads we assume it is the default value // We always "reset" as the cost is lightweight and easy to maintain. maxConcurrentDownloads := config.DefaultMaxConcurrentDownloads if conf.IsValueSet("max-concurrent-downloads") && conf.MaxConcurrentDownloads != nil { maxConcurrentDownloads = *conf.MaxConcurrentDownloads } daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) // If no value is set for max-concurrent-upload we assume it is the default value // We always "reset" as the cost is lightweight and easy to maintain. maxConcurrentUploads := config.DefaultMaxConcurrentUploads if conf.IsValueSet("max-concurrent-uploads") && conf.MaxConcurrentUploads != nil { maxConcurrentUploads = *conf.MaxConcurrentUploads } daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) if daemon.imageService != nil { daemon.imageService.UpdateConfig(&maxConcurrentDownloads, &maxConcurrentUploads) } // prepare reload event attributes with updatable configurations attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) // prepare reload event attributes with updatable configurations attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) } // reloadMaxDownloadAttempts updates configuration with max concurrent // download attempts when a connection is lost and updates the passed attributes func (daemon *Daemon) reloadMaxDownloadAttempts(conf *config.Config, attributes map[string]string) error { if err := config.ValidateMaxDownloadAttempts(conf); err != nil { return err } // If no value is set for max-download-attempts we assume it is the default value // We always "reset" as the cost is lightweight and easy to maintain. maxDownloadAttempts := config.DefaultDownloadAttempts if conf.IsValueSet("max-download-attempts") && conf.MaxDownloadAttempts != nil { maxDownloadAttempts = *conf.MaxDownloadAttempts } daemon.configStore.MaxDownloadAttempts = &maxDownloadAttempts logrus.Debugf("Reset Max Download Attempts: %d", *daemon.configStore.MaxDownloadAttempts) // prepare reload event attributes with updatable configurations attributes["max-download-attempts"] = fmt.Sprintf("%d", *daemon.configStore.MaxDownloadAttempts) return nil } // reloadShutdownTimeout updates configuration with daemon shutdown timeout option // and updates the passed attributes func (daemon *Daemon) reloadShutdownTimeout(conf *config.Config, attributes map[string]string) { // update corresponding configuration if conf.IsValueSet("shutdown-timeout") { daemon.configStore.ShutdownTimeout = conf.ShutdownTimeout logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout) } // prepare reload event attributes with updatable configurations attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout) } // reloadClusterDiscovery updates configuration with cluster discovery options // and updates the passed attributes func (daemon *Daemon) reloadClusterDiscovery(conf *config.Config, attributes map[string]string) (err error) { defer func() { // prepare reload event attributes with updatable configurations attributes["cluster-store"] = conf.ClusterStore attributes["cluster-advertise"] = conf.ClusterAdvertise attributes["cluster-store-opts"] = "{}" if daemon.configStore.ClusterOpts != nil { opts, err2 := json.Marshal(conf.ClusterOpts) if err != nil { err = err2 } attributes["cluster-store-opts"] = string(opts) } }() newAdvertise := conf.ClusterAdvertise newClusterStore := daemon.configStore.ClusterStore if conf.IsValueSet("cluster-advertise") { if conf.IsValueSet("cluster-store") { newClusterStore = conf.ClusterStore } newAdvertise, err = config.ParseClusterAdvertiseSettings(newClusterStore, conf.ClusterAdvertise) if err != nil && err != discovery.ErrDiscoveryDisabled { return err } } if daemon.clusterProvider != nil { if err := conf.IsSwarmCompatible(); err != nil { return err } } // check discovery modifications if !config.ModifiedDiscoverySettings(daemon.configStore, newClusterStore, newAdvertise, conf.ClusterOpts) { return nil } // enable discovery for the first time if it was not previously enabled if daemon.discoveryWatcher == nil { discoveryWatcher, err := discovery.Init(newClusterStore, newAdvertise, conf.ClusterOpts) if err != nil { return fmt.Errorf("failed to initialize discovery: %v", err) } daemon.discoveryWatcher = discoveryWatcher } else if err == discovery.ErrDiscoveryDisabled { // disable discovery if it was previously enabled and it's disabled now daemon.discoveryWatcher.Stop() } else if err = daemon.discoveryWatcher.Reload(conf.ClusterStore, newAdvertise, conf.ClusterOpts); err != nil { // reload discovery return err } daemon.configStore.ClusterStore = newClusterStore daemon.configStore.ClusterOpts = conf.ClusterOpts daemon.configStore.ClusterAdvertise = newAdvertise if daemon.netController == nil { return nil } netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil) if err != nil { logrus.WithError(err).Warn("failed to get options with network controller") return nil } err = daemon.netController.ReloadConfiguration(netOptions...) if err != nil { logrus.Warnf("Failed to reload configuration with network controller: %v", err) } return nil } // reloadLabels updates configuration with engine labels // and updates the passed attributes func (daemon *Daemon) reloadLabels(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("labels") { daemon.configStore.Labels = conf.Labels } // prepare reload event attributes with updatable configurations if daemon.configStore.Labels != nil { labels, err := json.Marshal(daemon.configStore.Labels) if err != nil { return err } attributes["labels"] = string(labels) } else { attributes["labels"] = "[]" } return nil } // reloadAllowNondistributableArtifacts updates the configuration with allow-nondistributable-artifacts options // and updates the passed attributes. func (daemon *Daemon) reloadAllowNondistributableArtifacts(conf *config.Config, attributes map[string]string) error { // Update corresponding configuration. if conf.IsValueSet("allow-nondistributable-artifacts") { daemon.configStore.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts if err := daemon.RegistryService.LoadAllowNondistributableArtifacts(conf.AllowNondistributableArtifacts); err != nil { return err } } // Prepare reload event attributes with updatable configurations. if daemon.configStore.AllowNondistributableArtifacts != nil { v, err := json.Marshal(daemon.configStore.AllowNondistributableArtifacts) if err != nil { return err } attributes["allow-nondistributable-artifacts"] = string(v) } else { attributes["allow-nondistributable-artifacts"] = "[]" } return nil } // reloadInsecureRegistries updates configuration with insecure registry option // and updates the passed attributes func (daemon *Daemon) reloadInsecureRegistries(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("insecure-registries") { daemon.configStore.InsecureRegistries = conf.InsecureRegistries if err := daemon.RegistryService.LoadInsecureRegistries(conf.InsecureRegistries); err != nil { return err } } // prepare reload event attributes with updatable configurations if daemon.configStore.InsecureRegistries != nil { insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries) if err != nil { return err } attributes["insecure-registries"] = string(insecureRegistries) } else { attributes["insecure-registries"] = "[]" } return nil } // reloadRegistryMirrors updates configuration with registry mirror options // and updates the passed attributes func (daemon *Daemon) reloadRegistryMirrors(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("registry-mirrors") { daemon.configStore.Mirrors = conf.Mirrors if err := daemon.RegistryService.LoadMirrors(conf.Mirrors); err != nil { return err } } // prepare reload event attributes with updatable configurations if daemon.configStore.Mirrors != nil { mirrors, err := json.Marshal(daemon.configStore.Mirrors) if err != nil { return err } attributes["registry-mirrors"] = string(mirrors) } else { attributes["registry-mirrors"] = "[]" } return nil } // reloadLiveRestore updates configuration with live restore option // and updates the passed attributes func (daemon *Daemon) reloadLiveRestore(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("live-restore") { daemon.configStore.LiveRestoreEnabled = conf.LiveRestoreEnabled } // prepare reload event attributes with updatable configurations attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled) return nil } // reloadNetworkDiagnosticPort updates the network controller starting the diagnostic if the config is valid func (daemon *Daemon) reloadNetworkDiagnosticPort(conf *config.Config, attributes map[string]string) error { if conf == nil || daemon.netController == nil || !conf.IsValueSet("network-diagnostic-port") || conf.NetworkDiagnosticPort < 1 || conf.NetworkDiagnosticPort > 65535 { // If there is no config make sure that the diagnostic is off if daemon.netController != nil { daemon.netController.StopDiagnostic() } return nil } // Enable the network diagnostic if the flag is set with a valid port within the range logrus.WithFields(logrus.Fields{"port": conf.NetworkDiagnosticPort, "ip": "127.0.0.1"}).Warn("Starting network diagnostic server") daemon.netController.StartDiagnostic(conf.NetworkDiagnosticPort) return nil } // reloadFeatures updates configuration with enabled/disabled features func (daemon *Daemon) reloadFeatures(conf *config.Config, attributes map[string]string) { // update corresponding configuration // note that we allow features option to be entirely unset daemon.configStore.Features = conf.Features // prepare reload event attributes with updatable configurations attributes["features"] = fmt.Sprintf("%v", daemon.configStore.Features) }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
This wasn't introduced here, but now that it's doing more work than just "convert thing to JSON", wouldn't it make sense to move this down inside the `if` statement below so we don't generate it unless we actually use it?
tianon
4,461
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
daemon/reload.go
package daemon // import "github.com/docker/docker/daemon" import ( "encoding/json" "fmt" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/discovery" "github.com/sirupsen/logrus" ) // Reload reads configuration changes and modifies the // daemon according to those changes. // These are the settings that Reload changes: // - Platform runtime // - Daemon debug log level // - Daemon max concurrent downloads // - Daemon max concurrent uploads // - Daemon max download attempts // - Daemon shutdown timeout (in seconds) // - Cluster discovery (reconfigure and restart) // - Daemon labels // - Insecure registries // - Registry mirrors // - Daemon live restore func (daemon *Daemon) Reload(conf *config.Config) (err error) { daemon.configStore.Lock() attributes := map[string]string{} defer func() { jsonString, _ := json.Marshal(daemon.configStore) // we're unlocking here, because // LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes() // holds that lock too. daemon.configStore.Unlock() if err == nil { logrus.Infof("Reloaded configuration: %s", jsonString) daemon.LogDaemonEventWithAttributes("reload", attributes) } }() if err := daemon.reloadPlatform(conf, attributes); err != nil { return err } daemon.reloadDebug(conf, attributes) daemon.reloadMaxConcurrentDownloadsAndUploads(conf, attributes) if err := daemon.reloadMaxDownloadAttempts(conf, attributes); err != nil { return err } daemon.reloadShutdownTimeout(conf, attributes) daemon.reloadFeatures(conf, attributes) if err := daemon.reloadClusterDiscovery(conf, attributes); err != nil { return err } if err := daemon.reloadLabels(conf, attributes); err != nil { return err } if err := daemon.reloadAllowNondistributableArtifacts(conf, attributes); err != nil { return err } if err := daemon.reloadInsecureRegistries(conf, attributes); err != nil { return err } if err := daemon.reloadRegistryMirrors(conf, attributes); err != nil { return err } if err := daemon.reloadLiveRestore(conf, attributes); err != nil { return err } return daemon.reloadNetworkDiagnosticPort(conf, attributes) } // reloadDebug updates configuration with Debug option // and updates the passed attributes func (daemon *Daemon) reloadDebug(conf *config.Config, attributes map[string]string) { // update corresponding configuration if conf.IsValueSet("debug") { daemon.configStore.Debug = conf.Debug } // prepare reload event attributes with updatable configurations attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) } // reloadMaxConcurrentDownloadsAndUploads updates configuration with max concurrent // download and upload options and updates the passed attributes func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(conf *config.Config, attributes map[string]string) { // If no value is set for max-concurrent-downloads we assume it is the default value // We always "reset" as the cost is lightweight and easy to maintain. maxConcurrentDownloads := config.DefaultMaxConcurrentDownloads if conf.IsValueSet("max-concurrent-downloads") && conf.MaxConcurrentDownloads != nil { maxConcurrentDownloads = *conf.MaxConcurrentDownloads } daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) // If no value is set for max-concurrent-upload we assume it is the default value // We always "reset" as the cost is lightweight and easy to maintain. maxConcurrentUploads := config.DefaultMaxConcurrentUploads if conf.IsValueSet("max-concurrent-uploads") && conf.MaxConcurrentUploads != nil { maxConcurrentUploads = *conf.MaxConcurrentUploads } daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) if daemon.imageService != nil { daemon.imageService.UpdateConfig(&maxConcurrentDownloads, &maxConcurrentUploads) } // prepare reload event attributes with updatable configurations attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) // prepare reload event attributes with updatable configurations attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) } // reloadMaxDownloadAttempts updates configuration with max concurrent // download attempts when a connection is lost and updates the passed attributes func (daemon *Daemon) reloadMaxDownloadAttempts(conf *config.Config, attributes map[string]string) error { if err := config.ValidateMaxDownloadAttempts(conf); err != nil { return err } // If no value is set for max-download-attempts we assume it is the default value // We always "reset" as the cost is lightweight and easy to maintain. maxDownloadAttempts := config.DefaultDownloadAttempts if conf.IsValueSet("max-download-attempts") && conf.MaxDownloadAttempts != nil { maxDownloadAttempts = *conf.MaxDownloadAttempts } daemon.configStore.MaxDownloadAttempts = &maxDownloadAttempts logrus.Debugf("Reset Max Download Attempts: %d", *daemon.configStore.MaxDownloadAttempts) // prepare reload event attributes with updatable configurations attributes["max-download-attempts"] = fmt.Sprintf("%d", *daemon.configStore.MaxDownloadAttempts) return nil } // reloadShutdownTimeout updates configuration with daemon shutdown timeout option // and updates the passed attributes func (daemon *Daemon) reloadShutdownTimeout(conf *config.Config, attributes map[string]string) { // update corresponding configuration if conf.IsValueSet("shutdown-timeout") { daemon.configStore.ShutdownTimeout = conf.ShutdownTimeout logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout) } // prepare reload event attributes with updatable configurations attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout) } // reloadClusterDiscovery updates configuration with cluster discovery options // and updates the passed attributes func (daemon *Daemon) reloadClusterDiscovery(conf *config.Config, attributes map[string]string) (err error) { defer func() { // prepare reload event attributes with updatable configurations attributes["cluster-store"] = conf.ClusterStore attributes["cluster-advertise"] = conf.ClusterAdvertise attributes["cluster-store-opts"] = "{}" if daemon.configStore.ClusterOpts != nil { opts, err2 := json.Marshal(conf.ClusterOpts) if err != nil { err = err2 } attributes["cluster-store-opts"] = string(opts) } }() newAdvertise := conf.ClusterAdvertise newClusterStore := daemon.configStore.ClusterStore if conf.IsValueSet("cluster-advertise") { if conf.IsValueSet("cluster-store") { newClusterStore = conf.ClusterStore } newAdvertise, err = config.ParseClusterAdvertiseSettings(newClusterStore, conf.ClusterAdvertise) if err != nil && err != discovery.ErrDiscoveryDisabled { return err } } if daemon.clusterProvider != nil { if err := conf.IsSwarmCompatible(); err != nil { return err } } // check discovery modifications if !config.ModifiedDiscoverySettings(daemon.configStore, newClusterStore, newAdvertise, conf.ClusterOpts) { return nil } // enable discovery for the first time if it was not previously enabled if daemon.discoveryWatcher == nil { discoveryWatcher, err := discovery.Init(newClusterStore, newAdvertise, conf.ClusterOpts) if err != nil { return fmt.Errorf("failed to initialize discovery: %v", err) } daemon.discoveryWatcher = discoveryWatcher } else if err == discovery.ErrDiscoveryDisabled { // disable discovery if it was previously enabled and it's disabled now daemon.discoveryWatcher.Stop() } else if err = daemon.discoveryWatcher.Reload(conf.ClusterStore, newAdvertise, conf.ClusterOpts); err != nil { // reload discovery return err } daemon.configStore.ClusterStore = newClusterStore daemon.configStore.ClusterOpts = conf.ClusterOpts daemon.configStore.ClusterAdvertise = newAdvertise if daemon.netController == nil { return nil } netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil) if err != nil { logrus.WithError(err).Warn("failed to get options with network controller") return nil } err = daemon.netController.ReloadConfiguration(netOptions...) if err != nil { logrus.Warnf("Failed to reload configuration with network controller: %v", err) } return nil } // reloadLabels updates configuration with engine labels // and updates the passed attributes func (daemon *Daemon) reloadLabels(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("labels") { daemon.configStore.Labels = conf.Labels } // prepare reload event attributes with updatable configurations if daemon.configStore.Labels != nil { labels, err := json.Marshal(daemon.configStore.Labels) if err != nil { return err } attributes["labels"] = string(labels) } else { attributes["labels"] = "[]" } return nil } // reloadAllowNondistributableArtifacts updates the configuration with allow-nondistributable-artifacts options // and updates the passed attributes. func (daemon *Daemon) reloadAllowNondistributableArtifacts(conf *config.Config, attributes map[string]string) error { // Update corresponding configuration. if conf.IsValueSet("allow-nondistributable-artifacts") { daemon.configStore.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts if err := daemon.RegistryService.LoadAllowNondistributableArtifacts(conf.AllowNondistributableArtifacts); err != nil { return err } } // Prepare reload event attributes with updatable configurations. if daemon.configStore.AllowNondistributableArtifacts != nil { v, err := json.Marshal(daemon.configStore.AllowNondistributableArtifacts) if err != nil { return err } attributes["allow-nondistributable-artifacts"] = string(v) } else { attributes["allow-nondistributable-artifacts"] = "[]" } return nil } // reloadInsecureRegistries updates configuration with insecure registry option // and updates the passed attributes func (daemon *Daemon) reloadInsecureRegistries(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("insecure-registries") { daemon.configStore.InsecureRegistries = conf.InsecureRegistries if err := daemon.RegistryService.LoadInsecureRegistries(conf.InsecureRegistries); err != nil { return err } } // prepare reload event attributes with updatable configurations if daemon.configStore.InsecureRegistries != nil { insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries) if err != nil { return err } attributes["insecure-registries"] = string(insecureRegistries) } else { attributes["insecure-registries"] = "[]" } return nil } // reloadRegistryMirrors updates configuration with registry mirror options // and updates the passed attributes func (daemon *Daemon) reloadRegistryMirrors(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("registry-mirrors") { daemon.configStore.Mirrors = conf.Mirrors if err := daemon.RegistryService.LoadMirrors(conf.Mirrors); err != nil { return err } } // prepare reload event attributes with updatable configurations if daemon.configStore.Mirrors != nil { mirrors, err := json.Marshal(daemon.configStore.Mirrors) if err != nil { return err } attributes["registry-mirrors"] = string(mirrors) } else { attributes["registry-mirrors"] = "[]" } return nil } // reloadLiveRestore updates configuration with live restore option // and updates the passed attributes func (daemon *Daemon) reloadLiveRestore(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("live-restore") { daemon.configStore.LiveRestoreEnabled = conf.LiveRestoreEnabled } // prepare reload event attributes with updatable configurations attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled) return nil } // reloadNetworkDiagnosticPort updates the network controller starting the diagnostic if the config is valid func (daemon *Daemon) reloadNetworkDiagnosticPort(conf *config.Config, attributes map[string]string) error { if conf == nil || daemon.netController == nil || !conf.IsValueSet("network-diagnostic-port") || conf.NetworkDiagnosticPort < 1 || conf.NetworkDiagnosticPort > 65535 { // If there is no config make sure that the diagnostic is off if daemon.netController != nil { daemon.netController.StopDiagnostic() } return nil } // Enable the network diagnostic if the flag is set with a valid port within the range logrus.WithFields(logrus.Fields{"port": conf.NetworkDiagnosticPort, "ip": "127.0.0.1"}).Warn("Starting network diagnostic server") daemon.netController.StartDiagnostic(conf.NetworkDiagnosticPort) return nil } // reloadFeatures updates configuration with enabled/disabled features func (daemon *Daemon) reloadFeatures(conf *config.Config, attributes map[string]string) { // update corresponding configuration // note that we allow features option to be entirely unset daemon.configStore.Features = conf.Features // prepare reload event attributes with updatable configurations attributes["features"] = fmt.Sprintf("%v", daemon.configStore.Features) }
package daemon // import "github.com/docker/docker/daemon" import ( "encoding/json" "fmt" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/discovery" "github.com/sirupsen/logrus" ) // Reload reads configuration changes and modifies the // daemon according to those changes. // These are the settings that Reload changes: // - Platform runtime // - Daemon debug log level // - Daemon max concurrent downloads // - Daemon max concurrent uploads // - Daemon max download attempts // - Daemon shutdown timeout (in seconds) // - Cluster discovery (reconfigure and restart) // - Daemon labels // - Insecure registries // - Registry mirrors // - Daemon live restore func (daemon *Daemon) Reload(conf *config.Config) (err error) { daemon.configStore.Lock() attributes := map[string]string{} defer func() { if err == nil { jsonString, _ := json.Marshal(&struct { *config.Config config.ProxyConfig }{ Config: daemon.configStore, ProxyConfig: config.ProxyConfig{ HTTPProxy: config.MaskCredentials(daemon.configStore.HTTPProxy), HTTPSProxy: config.MaskCredentials(daemon.configStore.HTTPSProxy), NoProxy: config.MaskCredentials(daemon.configStore.NoProxy), }, }) logrus.Infof("Reloaded configuration: %s", jsonString) } // we're unlocking here, because // LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes() // holds that lock too. daemon.configStore.Unlock() if err == nil { daemon.LogDaemonEventWithAttributes("reload", attributes) } }() if err := daemon.reloadPlatform(conf, attributes); err != nil { return err } daemon.reloadDebug(conf, attributes) daemon.reloadMaxConcurrentDownloadsAndUploads(conf, attributes) if err := daemon.reloadMaxDownloadAttempts(conf, attributes); err != nil { return err } daemon.reloadShutdownTimeout(conf, attributes) daemon.reloadFeatures(conf, attributes) if err := daemon.reloadClusterDiscovery(conf, attributes); err != nil { return err } if err := daemon.reloadLabels(conf, attributes); err != nil { return err } if err := daemon.reloadAllowNondistributableArtifacts(conf, attributes); err != nil { return err } if err := daemon.reloadInsecureRegistries(conf, attributes); err != nil { return err } if err := daemon.reloadRegistryMirrors(conf, attributes); err != nil { return err } if err := daemon.reloadLiveRestore(conf, attributes); err != nil { return err } return daemon.reloadNetworkDiagnosticPort(conf, attributes) } // reloadDebug updates configuration with Debug option // and updates the passed attributes func (daemon *Daemon) reloadDebug(conf *config.Config, attributes map[string]string) { // update corresponding configuration if conf.IsValueSet("debug") { daemon.configStore.Debug = conf.Debug } // prepare reload event attributes with updatable configurations attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) } // reloadMaxConcurrentDownloadsAndUploads updates configuration with max concurrent // download and upload options and updates the passed attributes func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(conf *config.Config, attributes map[string]string) { // If no value is set for max-concurrent-downloads we assume it is the default value // We always "reset" as the cost is lightweight and easy to maintain. maxConcurrentDownloads := config.DefaultMaxConcurrentDownloads if conf.IsValueSet("max-concurrent-downloads") && conf.MaxConcurrentDownloads != nil { maxConcurrentDownloads = *conf.MaxConcurrentDownloads } daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) // If no value is set for max-concurrent-upload we assume it is the default value // We always "reset" as the cost is lightweight and easy to maintain. maxConcurrentUploads := config.DefaultMaxConcurrentUploads if conf.IsValueSet("max-concurrent-uploads") && conf.MaxConcurrentUploads != nil { maxConcurrentUploads = *conf.MaxConcurrentUploads } daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) if daemon.imageService != nil { daemon.imageService.UpdateConfig(&maxConcurrentDownloads, &maxConcurrentUploads) } // prepare reload event attributes with updatable configurations attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) // prepare reload event attributes with updatable configurations attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) } // reloadMaxDownloadAttempts updates configuration with max concurrent // download attempts when a connection is lost and updates the passed attributes func (daemon *Daemon) reloadMaxDownloadAttempts(conf *config.Config, attributes map[string]string) error { if err := config.ValidateMaxDownloadAttempts(conf); err != nil { return err } // If no value is set for max-download-attempts we assume it is the default value // We always "reset" as the cost is lightweight and easy to maintain. maxDownloadAttempts := config.DefaultDownloadAttempts if conf.IsValueSet("max-download-attempts") && conf.MaxDownloadAttempts != nil { maxDownloadAttempts = *conf.MaxDownloadAttempts } daemon.configStore.MaxDownloadAttempts = &maxDownloadAttempts logrus.Debugf("Reset Max Download Attempts: %d", *daemon.configStore.MaxDownloadAttempts) // prepare reload event attributes with updatable configurations attributes["max-download-attempts"] = fmt.Sprintf("%d", *daemon.configStore.MaxDownloadAttempts) return nil } // reloadShutdownTimeout updates configuration with daemon shutdown timeout option // and updates the passed attributes func (daemon *Daemon) reloadShutdownTimeout(conf *config.Config, attributes map[string]string) { // update corresponding configuration if conf.IsValueSet("shutdown-timeout") { daemon.configStore.ShutdownTimeout = conf.ShutdownTimeout logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout) } // prepare reload event attributes with updatable configurations attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout) } // reloadClusterDiscovery updates configuration with cluster discovery options // and updates the passed attributes func (daemon *Daemon) reloadClusterDiscovery(conf *config.Config, attributes map[string]string) (err error) { defer func() { // prepare reload event attributes with updatable configurations attributes["cluster-store"] = conf.ClusterStore attributes["cluster-advertise"] = conf.ClusterAdvertise attributes["cluster-store-opts"] = "{}" if daemon.configStore.ClusterOpts != nil { opts, err2 := json.Marshal(conf.ClusterOpts) if err != nil { err = err2 } attributes["cluster-store-opts"] = string(opts) } }() newAdvertise := conf.ClusterAdvertise newClusterStore := daemon.configStore.ClusterStore if conf.IsValueSet("cluster-advertise") { if conf.IsValueSet("cluster-store") { newClusterStore = conf.ClusterStore } newAdvertise, err = config.ParseClusterAdvertiseSettings(newClusterStore, conf.ClusterAdvertise) if err != nil && err != discovery.ErrDiscoveryDisabled { return err } } if daemon.clusterProvider != nil { if err := conf.IsSwarmCompatible(); err != nil { return err } } // check discovery modifications if !config.ModifiedDiscoverySettings(daemon.configStore, newClusterStore, newAdvertise, conf.ClusterOpts) { return nil } // enable discovery for the first time if it was not previously enabled if daemon.discoveryWatcher == nil { discoveryWatcher, err := discovery.Init(newClusterStore, newAdvertise, conf.ClusterOpts) if err != nil { return fmt.Errorf("failed to initialize discovery: %v", err) } daemon.discoveryWatcher = discoveryWatcher } else if err == discovery.ErrDiscoveryDisabled { // disable discovery if it was previously enabled and it's disabled now daemon.discoveryWatcher.Stop() } else if err = daemon.discoveryWatcher.Reload(conf.ClusterStore, newAdvertise, conf.ClusterOpts); err != nil { // reload discovery return err } daemon.configStore.ClusterStore = newClusterStore daemon.configStore.ClusterOpts = conf.ClusterOpts daemon.configStore.ClusterAdvertise = newAdvertise if daemon.netController == nil { return nil } netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil) if err != nil { logrus.WithError(err).Warn("failed to get options with network controller") return nil } err = daemon.netController.ReloadConfiguration(netOptions...) if err != nil { logrus.Warnf("Failed to reload configuration with network controller: %v", err) } return nil } // reloadLabels updates configuration with engine labels // and updates the passed attributes func (daemon *Daemon) reloadLabels(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("labels") { daemon.configStore.Labels = conf.Labels } // prepare reload event attributes with updatable configurations if daemon.configStore.Labels != nil { labels, err := json.Marshal(daemon.configStore.Labels) if err != nil { return err } attributes["labels"] = string(labels) } else { attributes["labels"] = "[]" } return nil } // reloadAllowNondistributableArtifacts updates the configuration with allow-nondistributable-artifacts options // and updates the passed attributes. func (daemon *Daemon) reloadAllowNondistributableArtifacts(conf *config.Config, attributes map[string]string) error { // Update corresponding configuration. if conf.IsValueSet("allow-nondistributable-artifacts") { daemon.configStore.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts if err := daemon.RegistryService.LoadAllowNondistributableArtifacts(conf.AllowNondistributableArtifacts); err != nil { return err } } // Prepare reload event attributes with updatable configurations. if daemon.configStore.AllowNondistributableArtifacts != nil { v, err := json.Marshal(daemon.configStore.AllowNondistributableArtifacts) if err != nil { return err } attributes["allow-nondistributable-artifacts"] = string(v) } else { attributes["allow-nondistributable-artifacts"] = "[]" } return nil } // reloadInsecureRegistries updates configuration with insecure registry option // and updates the passed attributes func (daemon *Daemon) reloadInsecureRegistries(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("insecure-registries") { daemon.configStore.InsecureRegistries = conf.InsecureRegistries if err := daemon.RegistryService.LoadInsecureRegistries(conf.InsecureRegistries); err != nil { return err } } // prepare reload event attributes with updatable configurations if daemon.configStore.InsecureRegistries != nil { insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries) if err != nil { return err } attributes["insecure-registries"] = string(insecureRegistries) } else { attributes["insecure-registries"] = "[]" } return nil } // reloadRegistryMirrors updates configuration with registry mirror options // and updates the passed attributes func (daemon *Daemon) reloadRegistryMirrors(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("registry-mirrors") { daemon.configStore.Mirrors = conf.Mirrors if err := daemon.RegistryService.LoadMirrors(conf.Mirrors); err != nil { return err } } // prepare reload event attributes with updatable configurations if daemon.configStore.Mirrors != nil { mirrors, err := json.Marshal(daemon.configStore.Mirrors) if err != nil { return err } attributes["registry-mirrors"] = string(mirrors) } else { attributes["registry-mirrors"] = "[]" } return nil } // reloadLiveRestore updates configuration with live restore option // and updates the passed attributes func (daemon *Daemon) reloadLiveRestore(conf *config.Config, attributes map[string]string) error { // update corresponding configuration if conf.IsValueSet("live-restore") { daemon.configStore.LiveRestoreEnabled = conf.LiveRestoreEnabled } // prepare reload event attributes with updatable configurations attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled) return nil } // reloadNetworkDiagnosticPort updates the network controller starting the diagnostic if the config is valid func (daemon *Daemon) reloadNetworkDiagnosticPort(conf *config.Config, attributes map[string]string) error { if conf == nil || daemon.netController == nil || !conf.IsValueSet("network-diagnostic-port") || conf.NetworkDiagnosticPort < 1 || conf.NetworkDiagnosticPort > 65535 { // If there is no config make sure that the diagnostic is off if daemon.netController != nil { daemon.netController.StopDiagnostic() } return nil } // Enable the network diagnostic if the flag is set with a valid port within the range logrus.WithFields(logrus.Fields{"port": conf.NetworkDiagnosticPort, "ip": "127.0.0.1"}).Warn("Starting network diagnostic server") daemon.netController.StartDiagnostic(conf.NetworkDiagnosticPort) return nil } // reloadFeatures updates configuration with enabled/disabled features func (daemon *Daemon) reloadFeatures(conf *config.Config, attributes map[string]string) { // update corresponding configuration // note that we allow features option to be entirely unset daemon.configStore.Features = conf.Features // prepare reload event attributes with updatable configurations attributes["features"] = fmt.Sprintf("%v", daemon.configStore.Features) }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
Ah, yeah. I guess I put it here in the first place, so that we'd generate the JSON while we still have the lock. I could make this conditional though (only do this if `err ==nil`)
thaJeztah
4,462
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
integration/daemon/daemon_test.go
package daemon // import "github.com/docker/docker/integration/daemon" import ( "os" "os/exec" "path/filepath" "runtime" "testing" "github.com/docker/docker/daemon/config" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) func TestConfigDaemonLibtrustID(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) defer d.Stop(t) trustKey := filepath.Join(d.RootDir(), "key.json") err := os.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644) assert.NilError(t, err) config := filepath.Join(d.RootDir(), "daemon.json") err = os.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", config) info := d.Info(t) assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB") } func TestDaemonConfigValidation(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) dockerBinary, err := d.BinaryPath() assert.NilError(t, err) params := []string{"--validate", "--config-file"} dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata") const ( validOut = "configuration OK" failedOut = "unable to configure the Docker daemon with file" ) tests := []struct { name string args []string expectedOut string }{ { name: "config with no content", args: append(params, filepath.Join(testdata, "empty-config-1.json")), expectedOut: validOut, }, { name: "config with {}", args: append(params, filepath.Join(testdata, "empty-config-2.json")), expectedOut: validOut, }, { name: "invalid config", args: append(params, filepath.Join(testdata, "invalid-config-1.json")), expectedOut: failedOut, }, { name: "malformed config", args: append(params, filepath.Join(testdata, "malformed-config.json")), expectedOut: failedOut, }, { name: "valid config", args: append(params, filepath.Join(testdata, "valid-config-1.json")), expectedOut: validOut, }, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() cmd := exec.Command(dockerBinary, tc.args...) out, err := cmd.CombinedOutput() assert.Check(t, is.Contains(string(out), tc.expectedOut)) if tc.expectedOut == failedOut { assert.ErrorContains(t, err, "", "expected an error, but got none") } else { assert.NilError(t, err) } }) } } func TestConfigDaemonSeccompProfiles(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) defer d.Stop(t) tests := []struct { doc string profile string expectedProfile string }{ { doc: "empty profile set", profile: "", expectedProfile: config.SeccompProfileDefault, }, { doc: "default profile", profile: config.SeccompProfileDefault, expectedProfile: config.SeccompProfileDefault, }, { doc: "unconfined profile", profile: config.SeccompProfileUnconfined, expectedProfile: config.SeccompProfileUnconfined, }, } for _, tc := range tests { tc := tc t.Run(tc.doc, func(t *testing.T) { d.Start(t, "--seccomp-profile="+tc.profile) info := d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) cfg := filepath.Join(d.RootDir(), "daemon.json") err := os.WriteFile(cfg, []byte(`{"seccomp-profile": "`+tc.profile+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", cfg) info = d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) }) } }
package daemon // import "github.com/docker/docker/integration/daemon" import ( "context" "fmt" "net/http" "net/http/httptest" "os" "os/exec" "path/filepath" "runtime" "strings" "syscall" "testing" "github.com/docker/docker/api/types" "github.com/docker/docker/daemon/config" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/env" "gotest.tools/v3/skip" ) func TestConfigDaemonLibtrustID(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) defer d.Stop(t) trustKey := filepath.Join(d.RootDir(), "key.json") err := os.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644) assert.NilError(t, err) config := filepath.Join(d.RootDir(), "daemon.json") err = os.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", config) info := d.Info(t) assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB") } func TestDaemonConfigValidation(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) dockerBinary, err := d.BinaryPath() assert.NilError(t, err) params := []string{"--validate", "--config-file"} dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata") const ( validOut = "configuration OK" failedOut = "unable to configure the Docker daemon with file" ) tests := []struct { name string args []string expectedOut string }{ { name: "config with no content", args: append(params, filepath.Join(testdata, "empty-config-1.json")), expectedOut: validOut, }, { name: "config with {}", args: append(params, filepath.Join(testdata, "empty-config-2.json")), expectedOut: validOut, }, { name: "invalid config", args: append(params, filepath.Join(testdata, "invalid-config-1.json")), expectedOut: failedOut, }, { name: "malformed config", args: append(params, filepath.Join(testdata, "malformed-config.json")), expectedOut: failedOut, }, { name: "valid config", args: append(params, filepath.Join(testdata, "valid-config-1.json")), expectedOut: validOut, }, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() cmd := exec.Command(dockerBinary, tc.args...) out, err := cmd.CombinedOutput() assert.Check(t, is.Contains(string(out), tc.expectedOut)) if tc.expectedOut == failedOut { assert.ErrorContains(t, err, "", "expected an error, but got none") } else { assert.NilError(t, err) } }) } } func TestConfigDaemonSeccompProfiles(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) defer d.Stop(t) tests := []struct { doc string profile string expectedProfile string }{ { doc: "empty profile set", profile: "", expectedProfile: config.SeccompProfileDefault, }, { doc: "default profile", profile: config.SeccompProfileDefault, expectedProfile: config.SeccompProfileDefault, }, { doc: "unconfined profile", profile: config.SeccompProfileUnconfined, expectedProfile: config.SeccompProfileUnconfined, }, } for _, tc := range tests { tc := tc t.Run(tc.doc, func(t *testing.T) { d.Start(t, "--seccomp-profile="+tc.profile) info := d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) cfg := filepath.Join(d.RootDir(), "daemon.json") err := os.WriteFile(cfg, []byte(`{"seccomp-profile": "`+tc.profile+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", cfg) info = d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) }) } } func TestDaemonProxy(t *testing.T) { skip.If(t, runtime.GOOS == "windows", "cannot start multiple daemons on windows") skip.If(t, os.Getenv("DOCKER_ROOTLESS") != "", "cannot connect to localhost proxy in rootless environment") var received string proxyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { received = r.Host w.Header().Set("Content-Type", "application/json") _, _ = w.Write([]byte("OK")) })) defer proxyServer.Close() const userPass = "myuser:mypassword@" // Configure proxy through env-vars t.Run("environment variables", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", proxyServer.URL)() defer env.Patch(t, "HTTPS_PROXY", proxyServer.URL)() defer env.Patch(t, "NO_PROXY", "example.com")() d := daemon.New(t) c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() d.Start(t) _, err := c.ImagePull(ctx, "example.org:5000/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5000") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5000", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Configure proxy through command-line flags t.Run("command-line options", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "http_proxy", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "HTTPS_PROXY", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "https_proxy", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "NO_PROXY", "ignore.invalid")() defer env.Patch(t, "no_proxy", "ignore.invalid")() d := daemon.New(t) d.Start(t, "--http-proxy", proxyServer.URL, "--https-proxy", proxyServer.URL, "--no-proxy", "example.com") logs, err := d.ReadLogFile() assert.NilError(t, err) assert.Assert(t, is.Contains(string(logs), "overriding existing proxy variable with value from configuration")) for _, v := range []string{"http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY"} { assert.Assert(t, is.Contains(string(logs), "name="+v)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) } c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() _, err = c.ImagePull(ctx, "example.org:5001/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5001") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5001", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Configure proxy through configuration file t.Run("configuration file", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "http_proxy", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "HTTPS_PROXY", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "https_proxy", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "NO_PROXY", "ignore.invalid")() defer env.Patch(t, "no_proxy", "ignore.invalid")() d := daemon.New(t) c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() configFile := filepath.Join(d.RootDir(), "daemon.json") configJSON := fmt.Sprintf(`{"http-proxy":%[1]q, "https-proxy": %[1]q, "no-proxy": "example.com"}`, proxyServer.URL) assert.NilError(t, os.WriteFile(configFile, []byte(configJSON), 0644)) d.Start(t, "--config-file", configFile) logs, err := d.ReadLogFile() assert.NilError(t, err) assert.Assert(t, is.Contains(string(logs), "overriding existing proxy variable with value from configuration")) for _, v := range []string{"http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY"} { assert.Assert(t, is.Contains(string(logs), "name="+v)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) } _, err = c.ImagePull(ctx, "example.org:5002/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5002") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5002", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Conflicting options (passed both through command-line options and config file) t.Run("conflicting options", func(t *testing.T) { const ( proxyRawURL = "https://" + userPass + "example.org" proxyURL = "https://xxxxx:[email protected]" ) d := daemon.New(t) configFile := filepath.Join(d.RootDir(), "daemon.json") configJSON := fmt.Sprintf(`{"http-proxy":%[1]q, "https-proxy": %[1]q, "no-proxy": "example.com"}`, proxyRawURL) assert.NilError(t, os.WriteFile(configFile, []byte(configJSON), 0644)) err := d.StartWithError("--http-proxy", proxyRawURL, "--https-proxy", proxyRawURL, "--no-proxy", "example.com", "--config-file", configFile, "--validate") assert.ErrorContains(t, err, "daemon exited during startup") logs, err := d.ReadLogFile() assert.NilError(t, err) expected := fmt.Sprintf( `the following directives are specified both as a flag and in the configuration file: http-proxy: (from flag: %[1]s, from file: %[1]s), https-proxy: (from flag: %[1]s, from file: %[1]s), no-proxy: (from flag: example.com, from file: example.com)`, proxyURL, ) assert.Assert(t, is.Contains(string(logs), expected)) }) // Make sure values are sanitized when reloading the daemon-config t.Run("reload sanitized", func(t *testing.T) { const ( proxyRawURL = "https://" + userPass + "example.org" proxyURL = "https://xxxxx:[email protected]" ) d := daemon.New(t) d.Start(t, "--http-proxy", proxyRawURL, "--https-proxy", proxyRawURL, "--no-proxy", "example.com") defer d.Stop(t) err := d.Signal(syscall.SIGHUP) assert.NilError(t, err) logs, err := d.ReadLogFile() assert.NilError(t, err) // FIXME: there appears to ba a race condition, which causes ReadLogFile // to not contain the full logs after signaling the daemon to reload, // causing the test to fail here. As a workaround, check if we // received the "reloaded" message after signaling, and only then // check that it's sanitized properly. For more details on this // issue, see https://github.com/moby/moby/pull/42835/files#r713120315 if !strings.Contains(string(logs), "Reloaded configuration:") { t.Skip("Skipping test, because we did not find 'Reloaded configuration' in the logs") } assert.Assert(t, is.Contains(string(logs), proxyURL)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) }) }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
If someone knows if we can somehow test this in Windows CI, let me know 😅
thaJeztah
4,463
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
integration/daemon/daemon_test.go
package daemon // import "github.com/docker/docker/integration/daemon" import ( "os" "os/exec" "path/filepath" "runtime" "testing" "github.com/docker/docker/daemon/config" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) func TestConfigDaemonLibtrustID(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) defer d.Stop(t) trustKey := filepath.Join(d.RootDir(), "key.json") err := os.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644) assert.NilError(t, err) config := filepath.Join(d.RootDir(), "daemon.json") err = os.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", config) info := d.Info(t) assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB") } func TestDaemonConfigValidation(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) dockerBinary, err := d.BinaryPath() assert.NilError(t, err) params := []string{"--validate", "--config-file"} dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata") const ( validOut = "configuration OK" failedOut = "unable to configure the Docker daemon with file" ) tests := []struct { name string args []string expectedOut string }{ { name: "config with no content", args: append(params, filepath.Join(testdata, "empty-config-1.json")), expectedOut: validOut, }, { name: "config with {}", args: append(params, filepath.Join(testdata, "empty-config-2.json")), expectedOut: validOut, }, { name: "invalid config", args: append(params, filepath.Join(testdata, "invalid-config-1.json")), expectedOut: failedOut, }, { name: "malformed config", args: append(params, filepath.Join(testdata, "malformed-config.json")), expectedOut: failedOut, }, { name: "valid config", args: append(params, filepath.Join(testdata, "valid-config-1.json")), expectedOut: validOut, }, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() cmd := exec.Command(dockerBinary, tc.args...) out, err := cmd.CombinedOutput() assert.Check(t, is.Contains(string(out), tc.expectedOut)) if tc.expectedOut == failedOut { assert.ErrorContains(t, err, "", "expected an error, but got none") } else { assert.NilError(t, err) } }) } } func TestConfigDaemonSeccompProfiles(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) defer d.Stop(t) tests := []struct { doc string profile string expectedProfile string }{ { doc: "empty profile set", profile: "", expectedProfile: config.SeccompProfileDefault, }, { doc: "default profile", profile: config.SeccompProfileDefault, expectedProfile: config.SeccompProfileDefault, }, { doc: "unconfined profile", profile: config.SeccompProfileUnconfined, expectedProfile: config.SeccompProfileUnconfined, }, } for _, tc := range tests { tc := tc t.Run(tc.doc, func(t *testing.T) { d.Start(t, "--seccomp-profile="+tc.profile) info := d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) cfg := filepath.Join(d.RootDir(), "daemon.json") err := os.WriteFile(cfg, []byte(`{"seccomp-profile": "`+tc.profile+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", cfg) info = d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) }) } }
package daemon // import "github.com/docker/docker/integration/daemon" import ( "context" "fmt" "net/http" "net/http/httptest" "os" "os/exec" "path/filepath" "runtime" "strings" "syscall" "testing" "github.com/docker/docker/api/types" "github.com/docker/docker/daemon/config" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/env" "gotest.tools/v3/skip" ) func TestConfigDaemonLibtrustID(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) defer d.Stop(t) trustKey := filepath.Join(d.RootDir(), "key.json") err := os.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644) assert.NilError(t, err) config := filepath.Join(d.RootDir(), "daemon.json") err = os.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", config) info := d.Info(t) assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB") } func TestDaemonConfigValidation(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) dockerBinary, err := d.BinaryPath() assert.NilError(t, err) params := []string{"--validate", "--config-file"} dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata") const ( validOut = "configuration OK" failedOut = "unable to configure the Docker daemon with file" ) tests := []struct { name string args []string expectedOut string }{ { name: "config with no content", args: append(params, filepath.Join(testdata, "empty-config-1.json")), expectedOut: validOut, }, { name: "config with {}", args: append(params, filepath.Join(testdata, "empty-config-2.json")), expectedOut: validOut, }, { name: "invalid config", args: append(params, filepath.Join(testdata, "invalid-config-1.json")), expectedOut: failedOut, }, { name: "malformed config", args: append(params, filepath.Join(testdata, "malformed-config.json")), expectedOut: failedOut, }, { name: "valid config", args: append(params, filepath.Join(testdata, "valid-config-1.json")), expectedOut: validOut, }, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() cmd := exec.Command(dockerBinary, tc.args...) out, err := cmd.CombinedOutput() assert.Check(t, is.Contains(string(out), tc.expectedOut)) if tc.expectedOut == failedOut { assert.ErrorContains(t, err, "", "expected an error, but got none") } else { assert.NilError(t, err) } }) } } func TestConfigDaemonSeccompProfiles(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) defer d.Stop(t) tests := []struct { doc string profile string expectedProfile string }{ { doc: "empty profile set", profile: "", expectedProfile: config.SeccompProfileDefault, }, { doc: "default profile", profile: config.SeccompProfileDefault, expectedProfile: config.SeccompProfileDefault, }, { doc: "unconfined profile", profile: config.SeccompProfileUnconfined, expectedProfile: config.SeccompProfileUnconfined, }, } for _, tc := range tests { tc := tc t.Run(tc.doc, func(t *testing.T) { d.Start(t, "--seccomp-profile="+tc.profile) info := d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) cfg := filepath.Join(d.RootDir(), "daemon.json") err := os.WriteFile(cfg, []byte(`{"seccomp-profile": "`+tc.profile+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", cfg) info = d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) }) } } func TestDaemonProxy(t *testing.T) { skip.If(t, runtime.GOOS == "windows", "cannot start multiple daemons on windows") skip.If(t, os.Getenv("DOCKER_ROOTLESS") != "", "cannot connect to localhost proxy in rootless environment") var received string proxyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { received = r.Host w.Header().Set("Content-Type", "application/json") _, _ = w.Write([]byte("OK")) })) defer proxyServer.Close() const userPass = "myuser:mypassword@" // Configure proxy through env-vars t.Run("environment variables", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", proxyServer.URL)() defer env.Patch(t, "HTTPS_PROXY", proxyServer.URL)() defer env.Patch(t, "NO_PROXY", "example.com")() d := daemon.New(t) c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() d.Start(t) _, err := c.ImagePull(ctx, "example.org:5000/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5000") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5000", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Configure proxy through command-line flags t.Run("command-line options", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "http_proxy", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "HTTPS_PROXY", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "https_proxy", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "NO_PROXY", "ignore.invalid")() defer env.Patch(t, "no_proxy", "ignore.invalid")() d := daemon.New(t) d.Start(t, "--http-proxy", proxyServer.URL, "--https-proxy", proxyServer.URL, "--no-proxy", "example.com") logs, err := d.ReadLogFile() assert.NilError(t, err) assert.Assert(t, is.Contains(string(logs), "overriding existing proxy variable with value from configuration")) for _, v := range []string{"http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY"} { assert.Assert(t, is.Contains(string(logs), "name="+v)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) } c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() _, err = c.ImagePull(ctx, "example.org:5001/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5001") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5001", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Configure proxy through configuration file t.Run("configuration file", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "http_proxy", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "HTTPS_PROXY", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "https_proxy", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "NO_PROXY", "ignore.invalid")() defer env.Patch(t, "no_proxy", "ignore.invalid")() d := daemon.New(t) c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() configFile := filepath.Join(d.RootDir(), "daemon.json") configJSON := fmt.Sprintf(`{"http-proxy":%[1]q, "https-proxy": %[1]q, "no-proxy": "example.com"}`, proxyServer.URL) assert.NilError(t, os.WriteFile(configFile, []byte(configJSON), 0644)) d.Start(t, "--config-file", configFile) logs, err := d.ReadLogFile() assert.NilError(t, err) assert.Assert(t, is.Contains(string(logs), "overriding existing proxy variable with value from configuration")) for _, v := range []string{"http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY"} { assert.Assert(t, is.Contains(string(logs), "name="+v)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) } _, err = c.ImagePull(ctx, "example.org:5002/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5002") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5002", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Conflicting options (passed both through command-line options and config file) t.Run("conflicting options", func(t *testing.T) { const ( proxyRawURL = "https://" + userPass + "example.org" proxyURL = "https://xxxxx:[email protected]" ) d := daemon.New(t) configFile := filepath.Join(d.RootDir(), "daemon.json") configJSON := fmt.Sprintf(`{"http-proxy":%[1]q, "https-proxy": %[1]q, "no-proxy": "example.com"}`, proxyRawURL) assert.NilError(t, os.WriteFile(configFile, []byte(configJSON), 0644)) err := d.StartWithError("--http-proxy", proxyRawURL, "--https-proxy", proxyRawURL, "--no-proxy", "example.com", "--config-file", configFile, "--validate") assert.ErrorContains(t, err, "daemon exited during startup") logs, err := d.ReadLogFile() assert.NilError(t, err) expected := fmt.Sprintf( `the following directives are specified both as a flag and in the configuration file: http-proxy: (from flag: %[1]s, from file: %[1]s), https-proxy: (from flag: %[1]s, from file: %[1]s), no-proxy: (from flag: example.com, from file: example.com)`, proxyURL, ) assert.Assert(t, is.Contains(string(logs), expected)) }) // Make sure values are sanitized when reloading the daemon-config t.Run("reload sanitized", func(t *testing.T) { const ( proxyRawURL = "https://" + userPass + "example.org" proxyURL = "https://xxxxx:[email protected]" ) d := daemon.New(t) d.Start(t, "--http-proxy", proxyRawURL, "--https-proxy", proxyRawURL, "--no-proxy", "example.com") defer d.Stop(t) err := d.Signal(syscall.SIGHUP) assert.NilError(t, err) logs, err := d.ReadLogFile() assert.NilError(t, err) // FIXME: there appears to ba a race condition, which causes ReadLogFile // to not contain the full logs after signaling the daemon to reload, // causing the test to fail here. As a workaround, check if we // received the "reloaded" message after signaling, and only then // check that it's sanitized properly. For more details on this // issue, see https://github.com/moby/moby/pull/42835/files#r713120315 if !strings.Contains(string(logs), "Reloaded configuration:") { t.Skip("Skipping test, because we did not find 'Reloaded configuration' in the logs") } assert.Assert(t, is.Contains(string(logs), proxyURL)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) }) }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
Hmm.. for some reason this is failing in CI (but it worked locally). May need to start a new daemon after all, because it's also collecting logs for all the other tests, which may be a bit too much to check. Otherwise, perhaps I should change it to a unit-test edit: now creating separate daemons for each subtest, and this looks to work; ``` --- PASS: TestDaemonProxy (6.30s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.72s) --- PASS: TestDaemonProxy/configuration_file (1.72s) --- PASS: TestDaemonProxy/conflicting_options (0.50s) --- PASS: TestDaemonProxy/reload_sanitized (0.52s) ``` ``` time="2021-09-13T08:07:51.196830830Z" level=info msg="Reloaded configuration: {\"storage-driver\":\"overlay2\",\"mtu\":1500,\"pidfile\":\"/go/src/github.com/docker/docker/bundles/test-integration/TestDaemonProxy/reload_sanitized/d09b41937dc94/docker.pid\",\"data-root\":\"/go/src/github.com/docker/docker/bundles/test-integration/TestDaemonProxy/reload_sanitized/d09b41937dc94/root\",\"exec-root\":\"/tmp/dxr/d09b41937dc94\",\"group\":\"docker\",\"deprecated-key-path\":\"/etc/docker/key.json\",\"max-concurrent-downloads\":3,\"max-concurrent-uploads\":5,\"max-download-attempts\":5,\"shutdown-timeout\":15,\"debug\":true,\"hosts\":[\"unix:///tmp/docker-integration/d09b41937dc94.sock\"],\"log-level\":\"info\",\"swarm-default-advertise-addr\":\"\",\"swarm-raft-heartbeat-tick\":0,\"swarm-raft-election-tick\":0,\"metrics-addr\":\"\",\"host-gateway-ip\":\"172.18.0.1\",\"log-driver\":\"json-file\",\"ip\":\"0.0.0.0\",\"icc\":true,\"iptables\":true,\"ip-forward\":true,\"ip-masq\":true,\"userland-proxy\":true,\"default-address-pools\":{\"Values\":null},\"network-control-plane-mtu\":1500,\"experimental\":false,\"containerd\":\"/var/run/docker/containerd/containerd.sock\",\"builder\":{\"GC\":{},\"Entitlements\":{}},\"containerd-namespace\":\"d09b41937dc94\",\"containerd-plugin-namespace\":\"d09b41937dc94p\",\"runtimes\":{\"io.containerd.runc.v2\":{\"path\":\"runc\"},\"io.containerd.runtime.v1.linux\":{\"path\":\"runc\"},\"runc\":{\"path\":\"runc\"}},\"default-runtime\":\"runc\",\"seccomp-profile\":\"builtin\",\"default-shm-size\":67108864,\"default-ipc-mode\":\"private\",\"default-cgroupns-mode\":\"private\",\"resolv-conf\":\"/etc/resolv.conf\",\"http-proxy\":\"https://xxxxx:[email protected]\",\"https-proxy\":\"https://xxxxx:[email protected]\",\"no-proxy\":\"example.com\"}" ```
thaJeztah
4,464
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
integration/daemon/daemon_test.go
package daemon // import "github.com/docker/docker/integration/daemon" import ( "os" "os/exec" "path/filepath" "runtime" "testing" "github.com/docker/docker/daemon/config" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) func TestConfigDaemonLibtrustID(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) defer d.Stop(t) trustKey := filepath.Join(d.RootDir(), "key.json") err := os.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644) assert.NilError(t, err) config := filepath.Join(d.RootDir(), "daemon.json") err = os.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", config) info := d.Info(t) assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB") } func TestDaemonConfigValidation(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) dockerBinary, err := d.BinaryPath() assert.NilError(t, err) params := []string{"--validate", "--config-file"} dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata") const ( validOut = "configuration OK" failedOut = "unable to configure the Docker daemon with file" ) tests := []struct { name string args []string expectedOut string }{ { name: "config with no content", args: append(params, filepath.Join(testdata, "empty-config-1.json")), expectedOut: validOut, }, { name: "config with {}", args: append(params, filepath.Join(testdata, "empty-config-2.json")), expectedOut: validOut, }, { name: "invalid config", args: append(params, filepath.Join(testdata, "invalid-config-1.json")), expectedOut: failedOut, }, { name: "malformed config", args: append(params, filepath.Join(testdata, "malformed-config.json")), expectedOut: failedOut, }, { name: "valid config", args: append(params, filepath.Join(testdata, "valid-config-1.json")), expectedOut: validOut, }, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() cmd := exec.Command(dockerBinary, tc.args...) out, err := cmd.CombinedOutput() assert.Check(t, is.Contains(string(out), tc.expectedOut)) if tc.expectedOut == failedOut { assert.ErrorContains(t, err, "", "expected an error, but got none") } else { assert.NilError(t, err) } }) } } func TestConfigDaemonSeccompProfiles(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) defer d.Stop(t) tests := []struct { doc string profile string expectedProfile string }{ { doc: "empty profile set", profile: "", expectedProfile: config.SeccompProfileDefault, }, { doc: "default profile", profile: config.SeccompProfileDefault, expectedProfile: config.SeccompProfileDefault, }, { doc: "unconfined profile", profile: config.SeccompProfileUnconfined, expectedProfile: config.SeccompProfileUnconfined, }, } for _, tc := range tests { tc := tc t.Run(tc.doc, func(t *testing.T) { d.Start(t, "--seccomp-profile="+tc.profile) info := d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) cfg := filepath.Join(d.RootDir(), "daemon.json") err := os.WriteFile(cfg, []byte(`{"seccomp-profile": "`+tc.profile+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", cfg) info = d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) }) } }
package daemon // import "github.com/docker/docker/integration/daemon" import ( "context" "fmt" "net/http" "net/http/httptest" "os" "os/exec" "path/filepath" "runtime" "strings" "syscall" "testing" "github.com/docker/docker/api/types" "github.com/docker/docker/daemon/config" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/env" "gotest.tools/v3/skip" ) func TestConfigDaemonLibtrustID(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) defer d.Stop(t) trustKey := filepath.Join(d.RootDir(), "key.json") err := os.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644) assert.NilError(t, err) config := filepath.Join(d.RootDir(), "daemon.json") err = os.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", config) info := d.Info(t) assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB") } func TestDaemonConfigValidation(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) dockerBinary, err := d.BinaryPath() assert.NilError(t, err) params := []string{"--validate", "--config-file"} dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata") const ( validOut = "configuration OK" failedOut = "unable to configure the Docker daemon with file" ) tests := []struct { name string args []string expectedOut string }{ { name: "config with no content", args: append(params, filepath.Join(testdata, "empty-config-1.json")), expectedOut: validOut, }, { name: "config with {}", args: append(params, filepath.Join(testdata, "empty-config-2.json")), expectedOut: validOut, }, { name: "invalid config", args: append(params, filepath.Join(testdata, "invalid-config-1.json")), expectedOut: failedOut, }, { name: "malformed config", args: append(params, filepath.Join(testdata, "malformed-config.json")), expectedOut: failedOut, }, { name: "valid config", args: append(params, filepath.Join(testdata, "valid-config-1.json")), expectedOut: validOut, }, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() cmd := exec.Command(dockerBinary, tc.args...) out, err := cmd.CombinedOutput() assert.Check(t, is.Contains(string(out), tc.expectedOut)) if tc.expectedOut == failedOut { assert.ErrorContains(t, err, "", "expected an error, but got none") } else { assert.NilError(t, err) } }) } } func TestConfigDaemonSeccompProfiles(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) defer d.Stop(t) tests := []struct { doc string profile string expectedProfile string }{ { doc: "empty profile set", profile: "", expectedProfile: config.SeccompProfileDefault, }, { doc: "default profile", profile: config.SeccompProfileDefault, expectedProfile: config.SeccompProfileDefault, }, { doc: "unconfined profile", profile: config.SeccompProfileUnconfined, expectedProfile: config.SeccompProfileUnconfined, }, } for _, tc := range tests { tc := tc t.Run(tc.doc, func(t *testing.T) { d.Start(t, "--seccomp-profile="+tc.profile) info := d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) cfg := filepath.Join(d.RootDir(), "daemon.json") err := os.WriteFile(cfg, []byte(`{"seccomp-profile": "`+tc.profile+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", cfg) info = d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) }) } } func TestDaemonProxy(t *testing.T) { skip.If(t, runtime.GOOS == "windows", "cannot start multiple daemons on windows") skip.If(t, os.Getenv("DOCKER_ROOTLESS") != "", "cannot connect to localhost proxy in rootless environment") var received string proxyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { received = r.Host w.Header().Set("Content-Type", "application/json") _, _ = w.Write([]byte("OK")) })) defer proxyServer.Close() const userPass = "myuser:mypassword@" // Configure proxy through env-vars t.Run("environment variables", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", proxyServer.URL)() defer env.Patch(t, "HTTPS_PROXY", proxyServer.URL)() defer env.Patch(t, "NO_PROXY", "example.com")() d := daemon.New(t) c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() d.Start(t) _, err := c.ImagePull(ctx, "example.org:5000/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5000") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5000", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Configure proxy through command-line flags t.Run("command-line options", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "http_proxy", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "HTTPS_PROXY", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "https_proxy", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "NO_PROXY", "ignore.invalid")() defer env.Patch(t, "no_proxy", "ignore.invalid")() d := daemon.New(t) d.Start(t, "--http-proxy", proxyServer.URL, "--https-proxy", proxyServer.URL, "--no-proxy", "example.com") logs, err := d.ReadLogFile() assert.NilError(t, err) assert.Assert(t, is.Contains(string(logs), "overriding existing proxy variable with value from configuration")) for _, v := range []string{"http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY"} { assert.Assert(t, is.Contains(string(logs), "name="+v)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) } c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() _, err = c.ImagePull(ctx, "example.org:5001/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5001") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5001", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Configure proxy through configuration file t.Run("configuration file", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "http_proxy", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "HTTPS_PROXY", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "https_proxy", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "NO_PROXY", "ignore.invalid")() defer env.Patch(t, "no_proxy", "ignore.invalid")() d := daemon.New(t) c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() configFile := filepath.Join(d.RootDir(), "daemon.json") configJSON := fmt.Sprintf(`{"http-proxy":%[1]q, "https-proxy": %[1]q, "no-proxy": "example.com"}`, proxyServer.URL) assert.NilError(t, os.WriteFile(configFile, []byte(configJSON), 0644)) d.Start(t, "--config-file", configFile) logs, err := d.ReadLogFile() assert.NilError(t, err) assert.Assert(t, is.Contains(string(logs), "overriding existing proxy variable with value from configuration")) for _, v := range []string{"http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY"} { assert.Assert(t, is.Contains(string(logs), "name="+v)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) } _, err = c.ImagePull(ctx, "example.org:5002/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5002") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5002", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Conflicting options (passed both through command-line options and config file) t.Run("conflicting options", func(t *testing.T) { const ( proxyRawURL = "https://" + userPass + "example.org" proxyURL = "https://xxxxx:[email protected]" ) d := daemon.New(t) configFile := filepath.Join(d.RootDir(), "daemon.json") configJSON := fmt.Sprintf(`{"http-proxy":%[1]q, "https-proxy": %[1]q, "no-proxy": "example.com"}`, proxyRawURL) assert.NilError(t, os.WriteFile(configFile, []byte(configJSON), 0644)) err := d.StartWithError("--http-proxy", proxyRawURL, "--https-proxy", proxyRawURL, "--no-proxy", "example.com", "--config-file", configFile, "--validate") assert.ErrorContains(t, err, "daemon exited during startup") logs, err := d.ReadLogFile() assert.NilError(t, err) expected := fmt.Sprintf( `the following directives are specified both as a flag and in the configuration file: http-proxy: (from flag: %[1]s, from file: %[1]s), https-proxy: (from flag: %[1]s, from file: %[1]s), no-proxy: (from flag: example.com, from file: example.com)`, proxyURL, ) assert.Assert(t, is.Contains(string(logs), expected)) }) // Make sure values are sanitized when reloading the daemon-config t.Run("reload sanitized", func(t *testing.T) { const ( proxyRawURL = "https://" + userPass + "example.org" proxyURL = "https://xxxxx:[email protected]" ) d := daemon.New(t) d.Start(t, "--http-proxy", proxyRawURL, "--https-proxy", proxyRawURL, "--no-proxy", "example.com") defer d.Stop(t) err := d.Signal(syscall.SIGHUP) assert.NilError(t, err) logs, err := d.ReadLogFile() assert.NilError(t, err) // FIXME: there appears to ba a race condition, which causes ReadLogFile // to not contain the full logs after signaling the daemon to reload, // causing the test to fail here. As a workaround, check if we // received the "reloaded" message after signaling, and only then // check that it's sanitized properly. For more details on this // issue, see https://github.com/moby/moby/pull/42835/files#r713120315 if !strings.Contains(string(logs), "Reloaded configuration:") { t.Skip("Skipping test, because we did not find 'Reloaded configuration' in the logs") } assert.Assert(t, is.Contains(string(logs), proxyURL)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) }) }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
Looks like we may have to skip this on rootless as well; it's not connecting to the "proxy". Not exactly sure why it would not be supported by rootless though, but perhaps something in how networking is set up there? 🤔 ``` === RUN TestDaemonProxy/environment_variables daemon_test.go:183: assertion failed: (received string) != example.org:5000 (string) --- FAIL: TestDaemonProxy/environment_variables (0.53s) ``` ``` time="2021-09-09T12:24:03.244052911Z" level=debug msg="Calling POST /v1.42/images/create?fromImage=example.org%3A5000%2Fsome%2Fimage&tag=latest" time="2021-09-09T12:24:03.251949489Z" level=debug msg="hostDir: /home/unprivilegeduser/.config/docker/certs.d/example.org:5000" time="2021-09-09T12:24:03.251995467Z" level=debug msg="Trying to pull example.org:5000/some/image from https://example.org:5000 v2" time="2021-09-09T12:24:03.252172111Z" level=warning msg="Error getting v2 registry: Get \"https://example.org:5000/v2/\": proxyconnect tcp: dial tcp 127.0.0.1:45999: connect: connection refused" time="2021-09-09T12:24:03.252193892Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://example.org:5000/v2/\": proxyconnect tcp: dial tcp 127.0.0.1:45999: connect: connection refused" time="2021-09-09T12:24:03.255022400Z" level=error msg="Handler for POST /v1.42/images/create returned error: Get \"https://example.org:5000/v2/\": proxyconnect tcp: dial tcp 127.0.0.1:45999: connect: connection refused" ```
thaJeztah
4,465
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
integration/daemon/daemon_test.go
package daemon // import "github.com/docker/docker/integration/daemon" import ( "os" "os/exec" "path/filepath" "runtime" "testing" "github.com/docker/docker/daemon/config" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) func TestConfigDaemonLibtrustID(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) defer d.Stop(t) trustKey := filepath.Join(d.RootDir(), "key.json") err := os.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644) assert.NilError(t, err) config := filepath.Join(d.RootDir(), "daemon.json") err = os.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", config) info := d.Info(t) assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB") } func TestDaemonConfigValidation(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) dockerBinary, err := d.BinaryPath() assert.NilError(t, err) params := []string{"--validate", "--config-file"} dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata") const ( validOut = "configuration OK" failedOut = "unable to configure the Docker daemon with file" ) tests := []struct { name string args []string expectedOut string }{ { name: "config with no content", args: append(params, filepath.Join(testdata, "empty-config-1.json")), expectedOut: validOut, }, { name: "config with {}", args: append(params, filepath.Join(testdata, "empty-config-2.json")), expectedOut: validOut, }, { name: "invalid config", args: append(params, filepath.Join(testdata, "invalid-config-1.json")), expectedOut: failedOut, }, { name: "malformed config", args: append(params, filepath.Join(testdata, "malformed-config.json")), expectedOut: failedOut, }, { name: "valid config", args: append(params, filepath.Join(testdata, "valid-config-1.json")), expectedOut: validOut, }, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() cmd := exec.Command(dockerBinary, tc.args...) out, err := cmd.CombinedOutput() assert.Check(t, is.Contains(string(out), tc.expectedOut)) if tc.expectedOut == failedOut { assert.ErrorContains(t, err, "", "expected an error, but got none") } else { assert.NilError(t, err) } }) } } func TestConfigDaemonSeccompProfiles(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) defer d.Stop(t) tests := []struct { doc string profile string expectedProfile string }{ { doc: "empty profile set", profile: "", expectedProfile: config.SeccompProfileDefault, }, { doc: "default profile", profile: config.SeccompProfileDefault, expectedProfile: config.SeccompProfileDefault, }, { doc: "unconfined profile", profile: config.SeccompProfileUnconfined, expectedProfile: config.SeccompProfileUnconfined, }, } for _, tc := range tests { tc := tc t.Run(tc.doc, func(t *testing.T) { d.Start(t, "--seccomp-profile="+tc.profile) info := d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) cfg := filepath.Join(d.RootDir(), "daemon.json") err := os.WriteFile(cfg, []byte(`{"seccomp-profile": "`+tc.profile+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", cfg) info = d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) }) } }
package daemon // import "github.com/docker/docker/integration/daemon" import ( "context" "fmt" "net/http" "net/http/httptest" "os" "os/exec" "path/filepath" "runtime" "strings" "syscall" "testing" "github.com/docker/docker/api/types" "github.com/docker/docker/daemon/config" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/env" "gotest.tools/v3/skip" ) func TestConfigDaemonLibtrustID(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) defer d.Stop(t) trustKey := filepath.Join(d.RootDir(), "key.json") err := os.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644) assert.NilError(t, err) config := filepath.Join(d.RootDir(), "daemon.json") err = os.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", config) info := d.Info(t) assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB") } func TestDaemonConfigValidation(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) dockerBinary, err := d.BinaryPath() assert.NilError(t, err) params := []string{"--validate", "--config-file"} dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata") const ( validOut = "configuration OK" failedOut = "unable to configure the Docker daemon with file" ) tests := []struct { name string args []string expectedOut string }{ { name: "config with no content", args: append(params, filepath.Join(testdata, "empty-config-1.json")), expectedOut: validOut, }, { name: "config with {}", args: append(params, filepath.Join(testdata, "empty-config-2.json")), expectedOut: validOut, }, { name: "invalid config", args: append(params, filepath.Join(testdata, "invalid-config-1.json")), expectedOut: failedOut, }, { name: "malformed config", args: append(params, filepath.Join(testdata, "malformed-config.json")), expectedOut: failedOut, }, { name: "valid config", args: append(params, filepath.Join(testdata, "valid-config-1.json")), expectedOut: validOut, }, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() cmd := exec.Command(dockerBinary, tc.args...) out, err := cmd.CombinedOutput() assert.Check(t, is.Contains(string(out), tc.expectedOut)) if tc.expectedOut == failedOut { assert.ErrorContains(t, err, "", "expected an error, but got none") } else { assert.NilError(t, err) } }) } } func TestConfigDaemonSeccompProfiles(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) defer d.Stop(t) tests := []struct { doc string profile string expectedProfile string }{ { doc: "empty profile set", profile: "", expectedProfile: config.SeccompProfileDefault, }, { doc: "default profile", profile: config.SeccompProfileDefault, expectedProfile: config.SeccompProfileDefault, }, { doc: "unconfined profile", profile: config.SeccompProfileUnconfined, expectedProfile: config.SeccompProfileUnconfined, }, } for _, tc := range tests { tc := tc t.Run(tc.doc, func(t *testing.T) { d.Start(t, "--seccomp-profile="+tc.profile) info := d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) cfg := filepath.Join(d.RootDir(), "daemon.json") err := os.WriteFile(cfg, []byte(`{"seccomp-profile": "`+tc.profile+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", cfg) info = d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) }) } } func TestDaemonProxy(t *testing.T) { skip.If(t, runtime.GOOS == "windows", "cannot start multiple daemons on windows") skip.If(t, os.Getenv("DOCKER_ROOTLESS") != "", "cannot connect to localhost proxy in rootless environment") var received string proxyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { received = r.Host w.Header().Set("Content-Type", "application/json") _, _ = w.Write([]byte("OK")) })) defer proxyServer.Close() const userPass = "myuser:mypassword@" // Configure proxy through env-vars t.Run("environment variables", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", proxyServer.URL)() defer env.Patch(t, "HTTPS_PROXY", proxyServer.URL)() defer env.Patch(t, "NO_PROXY", "example.com")() d := daemon.New(t) c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() d.Start(t) _, err := c.ImagePull(ctx, "example.org:5000/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5000") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5000", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Configure proxy through command-line flags t.Run("command-line options", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "http_proxy", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "HTTPS_PROXY", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "https_proxy", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "NO_PROXY", "ignore.invalid")() defer env.Patch(t, "no_proxy", "ignore.invalid")() d := daemon.New(t) d.Start(t, "--http-proxy", proxyServer.URL, "--https-proxy", proxyServer.URL, "--no-proxy", "example.com") logs, err := d.ReadLogFile() assert.NilError(t, err) assert.Assert(t, is.Contains(string(logs), "overriding existing proxy variable with value from configuration")) for _, v := range []string{"http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY"} { assert.Assert(t, is.Contains(string(logs), "name="+v)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) } c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() _, err = c.ImagePull(ctx, "example.org:5001/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5001") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5001", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Configure proxy through configuration file t.Run("configuration file", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "http_proxy", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "HTTPS_PROXY", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "https_proxy", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "NO_PROXY", "ignore.invalid")() defer env.Patch(t, "no_proxy", "ignore.invalid")() d := daemon.New(t) c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() configFile := filepath.Join(d.RootDir(), "daemon.json") configJSON := fmt.Sprintf(`{"http-proxy":%[1]q, "https-proxy": %[1]q, "no-proxy": "example.com"}`, proxyServer.URL) assert.NilError(t, os.WriteFile(configFile, []byte(configJSON), 0644)) d.Start(t, "--config-file", configFile) logs, err := d.ReadLogFile() assert.NilError(t, err) assert.Assert(t, is.Contains(string(logs), "overriding existing proxy variable with value from configuration")) for _, v := range []string{"http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY"} { assert.Assert(t, is.Contains(string(logs), "name="+v)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) } _, err = c.ImagePull(ctx, "example.org:5002/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5002") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5002", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Conflicting options (passed both through command-line options and config file) t.Run("conflicting options", func(t *testing.T) { const ( proxyRawURL = "https://" + userPass + "example.org" proxyURL = "https://xxxxx:[email protected]" ) d := daemon.New(t) configFile := filepath.Join(d.RootDir(), "daemon.json") configJSON := fmt.Sprintf(`{"http-proxy":%[1]q, "https-proxy": %[1]q, "no-proxy": "example.com"}`, proxyRawURL) assert.NilError(t, os.WriteFile(configFile, []byte(configJSON), 0644)) err := d.StartWithError("--http-proxy", proxyRawURL, "--https-proxy", proxyRawURL, "--no-proxy", "example.com", "--config-file", configFile, "--validate") assert.ErrorContains(t, err, "daemon exited during startup") logs, err := d.ReadLogFile() assert.NilError(t, err) expected := fmt.Sprintf( `the following directives are specified both as a flag and in the configuration file: http-proxy: (from flag: %[1]s, from file: %[1]s), https-proxy: (from flag: %[1]s, from file: %[1]s), no-proxy: (from flag: example.com, from file: example.com)`, proxyURL, ) assert.Assert(t, is.Contains(string(logs), expected)) }) // Make sure values are sanitized when reloading the daemon-config t.Run("reload sanitized", func(t *testing.T) { const ( proxyRawURL = "https://" + userPass + "example.org" proxyURL = "https://xxxxx:[email protected]" ) d := daemon.New(t) d.Start(t, "--http-proxy", proxyRawURL, "--https-proxy", proxyRawURL, "--no-proxy", "example.com") defer d.Stop(t) err := d.Signal(syscall.SIGHUP) assert.NilError(t, err) logs, err := d.ReadLogFile() assert.NilError(t, err) // FIXME: there appears to ba a race condition, which causes ReadLogFile // to not contain the full logs after signaling the daemon to reload, // causing the test to fail here. As a workaround, check if we // received the "reloaded" message after signaling, and only then // check that it's sanitized properly. For more details on this // issue, see https://github.com/moby/moby/pull/42835/files#r713120315 if !strings.Contains(string(logs), "Reloaded configuration:") { t.Skip("Skipping test, because we did not find 'Reloaded configuration' in the logs") } assert.Assert(t, is.Contains(string(logs), proxyURL)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) }) }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
Running out of ideas on where the race condition is, and how to work around it. From CI, it looks like it's passing 23 out of 25 times (roughly 92% passes), so using this workaround to only verify if we actually read the logs with the "Reloaded configuration" message, otherwise skip the check. If anyone has ideas how to fix the root cause of the race condition; let me know , then we can remove this hack
thaJeztah
4,466
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
integration/daemon/daemon_test.go
package daemon // import "github.com/docker/docker/integration/daemon" import ( "os" "os/exec" "path/filepath" "runtime" "testing" "github.com/docker/docker/daemon/config" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) func TestConfigDaemonLibtrustID(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) defer d.Stop(t) trustKey := filepath.Join(d.RootDir(), "key.json") err := os.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644) assert.NilError(t, err) config := filepath.Join(d.RootDir(), "daemon.json") err = os.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", config) info := d.Info(t) assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB") } func TestDaemonConfigValidation(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) dockerBinary, err := d.BinaryPath() assert.NilError(t, err) params := []string{"--validate", "--config-file"} dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata") const ( validOut = "configuration OK" failedOut = "unable to configure the Docker daemon with file" ) tests := []struct { name string args []string expectedOut string }{ { name: "config with no content", args: append(params, filepath.Join(testdata, "empty-config-1.json")), expectedOut: validOut, }, { name: "config with {}", args: append(params, filepath.Join(testdata, "empty-config-2.json")), expectedOut: validOut, }, { name: "invalid config", args: append(params, filepath.Join(testdata, "invalid-config-1.json")), expectedOut: failedOut, }, { name: "malformed config", args: append(params, filepath.Join(testdata, "malformed-config.json")), expectedOut: failedOut, }, { name: "valid config", args: append(params, filepath.Join(testdata, "valid-config-1.json")), expectedOut: validOut, }, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() cmd := exec.Command(dockerBinary, tc.args...) out, err := cmd.CombinedOutput() assert.Check(t, is.Contains(string(out), tc.expectedOut)) if tc.expectedOut == failedOut { assert.ErrorContains(t, err, "", "expected an error, but got none") } else { assert.NilError(t, err) } }) } } func TestConfigDaemonSeccompProfiles(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) defer d.Stop(t) tests := []struct { doc string profile string expectedProfile string }{ { doc: "empty profile set", profile: "", expectedProfile: config.SeccompProfileDefault, }, { doc: "default profile", profile: config.SeccompProfileDefault, expectedProfile: config.SeccompProfileDefault, }, { doc: "unconfined profile", profile: config.SeccompProfileUnconfined, expectedProfile: config.SeccompProfileUnconfined, }, } for _, tc := range tests { tc := tc t.Run(tc.doc, func(t *testing.T) { d.Start(t, "--seccomp-profile="+tc.profile) info := d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) cfg := filepath.Join(d.RootDir(), "daemon.json") err := os.WriteFile(cfg, []byte(`{"seccomp-profile": "`+tc.profile+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", cfg) info = d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) }) } }
package daemon // import "github.com/docker/docker/integration/daemon" import ( "context" "fmt" "net/http" "net/http/httptest" "os" "os/exec" "path/filepath" "runtime" "strings" "syscall" "testing" "github.com/docker/docker/api/types" "github.com/docker/docker/daemon/config" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/env" "gotest.tools/v3/skip" ) func TestConfigDaemonLibtrustID(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) defer d.Stop(t) trustKey := filepath.Join(d.RootDir(), "key.json") err := os.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644) assert.NilError(t, err) config := filepath.Join(d.RootDir(), "daemon.json") err = os.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", config) info := d.Info(t) assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB") } func TestDaemonConfigValidation(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) dockerBinary, err := d.BinaryPath() assert.NilError(t, err) params := []string{"--validate", "--config-file"} dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata") const ( validOut = "configuration OK" failedOut = "unable to configure the Docker daemon with file" ) tests := []struct { name string args []string expectedOut string }{ { name: "config with no content", args: append(params, filepath.Join(testdata, "empty-config-1.json")), expectedOut: validOut, }, { name: "config with {}", args: append(params, filepath.Join(testdata, "empty-config-2.json")), expectedOut: validOut, }, { name: "invalid config", args: append(params, filepath.Join(testdata, "invalid-config-1.json")), expectedOut: failedOut, }, { name: "malformed config", args: append(params, filepath.Join(testdata, "malformed-config.json")), expectedOut: failedOut, }, { name: "valid config", args: append(params, filepath.Join(testdata, "valid-config-1.json")), expectedOut: validOut, }, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() cmd := exec.Command(dockerBinary, tc.args...) out, err := cmd.CombinedOutput() assert.Check(t, is.Contains(string(out), tc.expectedOut)) if tc.expectedOut == failedOut { assert.ErrorContains(t, err, "", "expected an error, but got none") } else { assert.NilError(t, err) } }) } } func TestConfigDaemonSeccompProfiles(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) defer d.Stop(t) tests := []struct { doc string profile string expectedProfile string }{ { doc: "empty profile set", profile: "", expectedProfile: config.SeccompProfileDefault, }, { doc: "default profile", profile: config.SeccompProfileDefault, expectedProfile: config.SeccompProfileDefault, }, { doc: "unconfined profile", profile: config.SeccompProfileUnconfined, expectedProfile: config.SeccompProfileUnconfined, }, } for _, tc := range tests { tc := tc t.Run(tc.doc, func(t *testing.T) { d.Start(t, "--seccomp-profile="+tc.profile) info := d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) cfg := filepath.Join(d.RootDir(), "daemon.json") err := os.WriteFile(cfg, []byte(`{"seccomp-profile": "`+tc.profile+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", cfg) info = d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) }) } } func TestDaemonProxy(t *testing.T) { skip.If(t, runtime.GOOS == "windows", "cannot start multiple daemons on windows") skip.If(t, os.Getenv("DOCKER_ROOTLESS") != "", "cannot connect to localhost proxy in rootless environment") var received string proxyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { received = r.Host w.Header().Set("Content-Type", "application/json") _, _ = w.Write([]byte("OK")) })) defer proxyServer.Close() const userPass = "myuser:mypassword@" // Configure proxy through env-vars t.Run("environment variables", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", proxyServer.URL)() defer env.Patch(t, "HTTPS_PROXY", proxyServer.URL)() defer env.Patch(t, "NO_PROXY", "example.com")() d := daemon.New(t) c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() d.Start(t) _, err := c.ImagePull(ctx, "example.org:5000/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5000") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5000", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Configure proxy through command-line flags t.Run("command-line options", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "http_proxy", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "HTTPS_PROXY", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "https_proxy", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "NO_PROXY", "ignore.invalid")() defer env.Patch(t, "no_proxy", "ignore.invalid")() d := daemon.New(t) d.Start(t, "--http-proxy", proxyServer.URL, "--https-proxy", proxyServer.URL, "--no-proxy", "example.com") logs, err := d.ReadLogFile() assert.NilError(t, err) assert.Assert(t, is.Contains(string(logs), "overriding existing proxy variable with value from configuration")) for _, v := range []string{"http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY"} { assert.Assert(t, is.Contains(string(logs), "name="+v)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) } c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() _, err = c.ImagePull(ctx, "example.org:5001/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5001") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5001", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Configure proxy through configuration file t.Run("configuration file", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "http_proxy", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "HTTPS_PROXY", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "https_proxy", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "NO_PROXY", "ignore.invalid")() defer env.Patch(t, "no_proxy", "ignore.invalid")() d := daemon.New(t) c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() configFile := filepath.Join(d.RootDir(), "daemon.json") configJSON := fmt.Sprintf(`{"http-proxy":%[1]q, "https-proxy": %[1]q, "no-proxy": "example.com"}`, proxyServer.URL) assert.NilError(t, os.WriteFile(configFile, []byte(configJSON), 0644)) d.Start(t, "--config-file", configFile) logs, err := d.ReadLogFile() assert.NilError(t, err) assert.Assert(t, is.Contains(string(logs), "overriding existing proxy variable with value from configuration")) for _, v := range []string{"http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY"} { assert.Assert(t, is.Contains(string(logs), "name="+v)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) } _, err = c.ImagePull(ctx, "example.org:5002/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5002") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5002", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Conflicting options (passed both through command-line options and config file) t.Run("conflicting options", func(t *testing.T) { const ( proxyRawURL = "https://" + userPass + "example.org" proxyURL = "https://xxxxx:[email protected]" ) d := daemon.New(t) configFile := filepath.Join(d.RootDir(), "daemon.json") configJSON := fmt.Sprintf(`{"http-proxy":%[1]q, "https-proxy": %[1]q, "no-proxy": "example.com"}`, proxyRawURL) assert.NilError(t, os.WriteFile(configFile, []byte(configJSON), 0644)) err := d.StartWithError("--http-proxy", proxyRawURL, "--https-proxy", proxyRawURL, "--no-proxy", "example.com", "--config-file", configFile, "--validate") assert.ErrorContains(t, err, "daemon exited during startup") logs, err := d.ReadLogFile() assert.NilError(t, err) expected := fmt.Sprintf( `the following directives are specified both as a flag and in the configuration file: http-proxy: (from flag: %[1]s, from file: %[1]s), https-proxy: (from flag: %[1]s, from file: %[1]s), no-proxy: (from flag: example.com, from file: example.com)`, proxyURL, ) assert.Assert(t, is.Contains(string(logs), expected)) }) // Make sure values are sanitized when reloading the daemon-config t.Run("reload sanitized", func(t *testing.T) { const ( proxyRawURL = "https://" + userPass + "example.org" proxyURL = "https://xxxxx:[email protected]" ) d := daemon.New(t) d.Start(t, "--http-proxy", proxyRawURL, "--https-proxy", proxyRawURL, "--no-proxy", "example.com") defer d.Stop(t) err := d.Signal(syscall.SIGHUP) assert.NilError(t, err) logs, err := d.ReadLogFile() assert.NilError(t, err) // FIXME: there appears to ba a race condition, which causes ReadLogFile // to not contain the full logs after signaling the daemon to reload, // causing the test to fail here. As a workaround, check if we // received the "reloaded" message after signaling, and only then // check that it's sanitized properly. For more details on this // issue, see https://github.com/moby/moby/pull/42835/files#r713120315 if !strings.Contains(string(logs), "Reloaded configuration:") { t.Skip("Skipping test, because we did not find 'Reloaded configuration' in the logs") } assert.Assert(t, is.Contains(string(logs), proxyURL)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) }) }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
Should we put in a `t.Skip` or something? Too bad there's not something that's the opposite of `-short` like `-extended` for tests like this. :sweat_smile:
tianon
4,467
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
integration/daemon/daemon_test.go
package daemon // import "github.com/docker/docker/integration/daemon" import ( "os" "os/exec" "path/filepath" "runtime" "testing" "github.com/docker/docker/daemon/config" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) func TestConfigDaemonLibtrustID(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) defer d.Stop(t) trustKey := filepath.Join(d.RootDir(), "key.json") err := os.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644) assert.NilError(t, err) config := filepath.Join(d.RootDir(), "daemon.json") err = os.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", config) info := d.Info(t) assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB") } func TestDaemonConfigValidation(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) dockerBinary, err := d.BinaryPath() assert.NilError(t, err) params := []string{"--validate", "--config-file"} dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata") const ( validOut = "configuration OK" failedOut = "unable to configure the Docker daemon with file" ) tests := []struct { name string args []string expectedOut string }{ { name: "config with no content", args: append(params, filepath.Join(testdata, "empty-config-1.json")), expectedOut: validOut, }, { name: "config with {}", args: append(params, filepath.Join(testdata, "empty-config-2.json")), expectedOut: validOut, }, { name: "invalid config", args: append(params, filepath.Join(testdata, "invalid-config-1.json")), expectedOut: failedOut, }, { name: "malformed config", args: append(params, filepath.Join(testdata, "malformed-config.json")), expectedOut: failedOut, }, { name: "valid config", args: append(params, filepath.Join(testdata, "valid-config-1.json")), expectedOut: validOut, }, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() cmd := exec.Command(dockerBinary, tc.args...) out, err := cmd.CombinedOutput() assert.Check(t, is.Contains(string(out), tc.expectedOut)) if tc.expectedOut == failedOut { assert.ErrorContains(t, err, "", "expected an error, but got none") } else { assert.NilError(t, err) } }) } } func TestConfigDaemonSeccompProfiles(t *testing.T) { skip.If(t, runtime.GOOS != "linux") d := daemon.New(t) defer d.Stop(t) tests := []struct { doc string profile string expectedProfile string }{ { doc: "empty profile set", profile: "", expectedProfile: config.SeccompProfileDefault, }, { doc: "default profile", profile: config.SeccompProfileDefault, expectedProfile: config.SeccompProfileDefault, }, { doc: "unconfined profile", profile: config.SeccompProfileUnconfined, expectedProfile: config.SeccompProfileUnconfined, }, } for _, tc := range tests { tc := tc t.Run(tc.doc, func(t *testing.T) { d.Start(t, "--seccomp-profile="+tc.profile) info := d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) cfg := filepath.Join(d.RootDir(), "daemon.json") err := os.WriteFile(cfg, []byte(`{"seccomp-profile": "`+tc.profile+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", cfg) info = d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) }) } }
package daemon // import "github.com/docker/docker/integration/daemon" import ( "context" "fmt" "net/http" "net/http/httptest" "os" "os/exec" "path/filepath" "runtime" "strings" "syscall" "testing" "github.com/docker/docker/api/types" "github.com/docker/docker/daemon/config" "github.com/docker/docker/testutil/daemon" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/env" "gotest.tools/v3/skip" ) func TestConfigDaemonLibtrustID(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) defer d.Stop(t) trustKey := filepath.Join(d.RootDir(), "key.json") err := os.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644) assert.NilError(t, err) config := filepath.Join(d.RootDir(), "daemon.json") err = os.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", config) info := d.Info(t) assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB") } func TestDaemonConfigValidation(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) dockerBinary, err := d.BinaryPath() assert.NilError(t, err) params := []string{"--validate", "--config-file"} dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata") const ( validOut = "configuration OK" failedOut = "unable to configure the Docker daemon with file" ) tests := []struct { name string args []string expectedOut string }{ { name: "config with no content", args: append(params, filepath.Join(testdata, "empty-config-1.json")), expectedOut: validOut, }, { name: "config with {}", args: append(params, filepath.Join(testdata, "empty-config-2.json")), expectedOut: validOut, }, { name: "invalid config", args: append(params, filepath.Join(testdata, "invalid-config-1.json")), expectedOut: failedOut, }, { name: "malformed config", args: append(params, filepath.Join(testdata, "malformed-config.json")), expectedOut: failedOut, }, { name: "valid config", args: append(params, filepath.Join(testdata, "valid-config-1.json")), expectedOut: validOut, }, } for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() cmd := exec.Command(dockerBinary, tc.args...) out, err := cmd.CombinedOutput() assert.Check(t, is.Contains(string(out), tc.expectedOut)) if tc.expectedOut == failedOut { assert.ErrorContains(t, err, "", "expected an error, but got none") } else { assert.NilError(t, err) } }) } } func TestConfigDaemonSeccompProfiles(t *testing.T) { skip.If(t, runtime.GOOS == "windows") d := daemon.New(t) defer d.Stop(t) tests := []struct { doc string profile string expectedProfile string }{ { doc: "empty profile set", profile: "", expectedProfile: config.SeccompProfileDefault, }, { doc: "default profile", profile: config.SeccompProfileDefault, expectedProfile: config.SeccompProfileDefault, }, { doc: "unconfined profile", profile: config.SeccompProfileUnconfined, expectedProfile: config.SeccompProfileUnconfined, }, } for _, tc := range tests { tc := tc t.Run(tc.doc, func(t *testing.T) { d.Start(t, "--seccomp-profile="+tc.profile) info := d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) cfg := filepath.Join(d.RootDir(), "daemon.json") err := os.WriteFile(cfg, []byte(`{"seccomp-profile": "`+tc.profile+`"}`), 0644) assert.NilError(t, err) d.Start(t, "--config-file", cfg) info = d.Info(t) assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile)) d.Stop(t) }) } } func TestDaemonProxy(t *testing.T) { skip.If(t, runtime.GOOS == "windows", "cannot start multiple daemons on windows") skip.If(t, os.Getenv("DOCKER_ROOTLESS") != "", "cannot connect to localhost proxy in rootless environment") var received string proxyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { received = r.Host w.Header().Set("Content-Type", "application/json") _, _ = w.Write([]byte("OK")) })) defer proxyServer.Close() const userPass = "myuser:mypassword@" // Configure proxy through env-vars t.Run("environment variables", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", proxyServer.URL)() defer env.Patch(t, "HTTPS_PROXY", proxyServer.URL)() defer env.Patch(t, "NO_PROXY", "example.com")() d := daemon.New(t) c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() d.Start(t) _, err := c.ImagePull(ctx, "example.org:5000/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5000") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5000", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Configure proxy through command-line flags t.Run("command-line options", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "http_proxy", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "HTTPS_PROXY", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "https_proxy", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "NO_PROXY", "ignore.invalid")() defer env.Patch(t, "no_proxy", "ignore.invalid")() d := daemon.New(t) d.Start(t, "--http-proxy", proxyServer.URL, "--https-proxy", proxyServer.URL, "--no-proxy", "example.com") logs, err := d.ReadLogFile() assert.NilError(t, err) assert.Assert(t, is.Contains(string(logs), "overriding existing proxy variable with value from configuration")) for _, v := range []string{"http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY"} { assert.Assert(t, is.Contains(string(logs), "name="+v)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) } c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() _, err = c.ImagePull(ctx, "example.org:5001/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5001") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5001", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Configure proxy through configuration file t.Run("configuration file", func(t *testing.T) { defer env.Patch(t, "HTTP_PROXY", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "http_proxy", "http://"+userPass+"from-env-http.invalid")() defer env.Patch(t, "HTTPS_PROXY", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "https_proxy", "https://"+userPass+"myuser:[email protected]")() defer env.Patch(t, "NO_PROXY", "ignore.invalid")() defer env.Patch(t, "no_proxy", "ignore.invalid")() d := daemon.New(t) c := d.NewClientT(t) defer func() { _ = c.Close() }() ctx := context.Background() configFile := filepath.Join(d.RootDir(), "daemon.json") configJSON := fmt.Sprintf(`{"http-proxy":%[1]q, "https-proxy": %[1]q, "no-proxy": "example.com"}`, proxyServer.URL) assert.NilError(t, os.WriteFile(configFile, []byte(configJSON), 0644)) d.Start(t, "--config-file", configFile) logs, err := d.ReadLogFile() assert.NilError(t, err) assert.Assert(t, is.Contains(string(logs), "overriding existing proxy variable with value from configuration")) for _, v := range []string{"http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY"} { assert.Assert(t, is.Contains(string(logs), "name="+v)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) } _, err = c.ImagePull(ctx, "example.org:5002/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5002") // Test NoProxy: example.com should not hit the proxy, and "received" variable should not be changed. _, err = c.ImagePull(ctx, "example.com/some/image:latest", types.ImagePullOptions{}) assert.ErrorContains(t, err, "", "pulling should have failed") assert.Equal(t, received, "example.org:5002", "should not have used proxy") info := d.Info(t) assert.Equal(t, info.HTTPProxy, proxyServer.URL) assert.Equal(t, info.HTTPSProxy, proxyServer.URL) assert.Equal(t, info.NoProxy, "example.com") d.Stop(t) }) // Conflicting options (passed both through command-line options and config file) t.Run("conflicting options", func(t *testing.T) { const ( proxyRawURL = "https://" + userPass + "example.org" proxyURL = "https://xxxxx:[email protected]" ) d := daemon.New(t) configFile := filepath.Join(d.RootDir(), "daemon.json") configJSON := fmt.Sprintf(`{"http-proxy":%[1]q, "https-proxy": %[1]q, "no-proxy": "example.com"}`, proxyRawURL) assert.NilError(t, os.WriteFile(configFile, []byte(configJSON), 0644)) err := d.StartWithError("--http-proxy", proxyRawURL, "--https-proxy", proxyRawURL, "--no-proxy", "example.com", "--config-file", configFile, "--validate") assert.ErrorContains(t, err, "daemon exited during startup") logs, err := d.ReadLogFile() assert.NilError(t, err) expected := fmt.Sprintf( `the following directives are specified both as a flag and in the configuration file: http-proxy: (from flag: %[1]s, from file: %[1]s), https-proxy: (from flag: %[1]s, from file: %[1]s), no-proxy: (from flag: example.com, from file: example.com)`, proxyURL, ) assert.Assert(t, is.Contains(string(logs), expected)) }) // Make sure values are sanitized when reloading the daemon-config t.Run("reload sanitized", func(t *testing.T) { const ( proxyRawURL = "https://" + userPass + "example.org" proxyURL = "https://xxxxx:[email protected]" ) d := daemon.New(t) d.Start(t, "--http-proxy", proxyRawURL, "--https-proxy", proxyRawURL, "--no-proxy", "example.com") defer d.Stop(t) err := d.Signal(syscall.SIGHUP) assert.NilError(t, err) logs, err := d.ReadLogFile() assert.NilError(t, err) // FIXME: there appears to ba a race condition, which causes ReadLogFile // to not contain the full logs after signaling the daemon to reload, // causing the test to fail here. As a workaround, check if we // received the "reloaded" message after signaling, and only then // check that it's sanitized properly. For more details on this // issue, see https://github.com/moby/moby/pull/42835/files#r713120315 if !strings.Contains(string(logs), "Reloaded configuration:") { t.Skip("Skipping test, because we did not find 'Reloaded configuration' in the logs") } assert.Assert(t, is.Contains(string(logs), proxyURL)) assert.Assert(t, !strings.Contains(string(logs), userPass), "logs should not contain the non-sanitized proxy URL: %s", string(logs)) }) }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
Ah, yes, a `t.Skip()` could make sense; didn't come to mind, because we would normally do that only at the _start_ of a test, but I guess it's ok to do it elsewhere. I'll update
thaJeztah
4,468
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
testutil/daemon/daemon.go
package daemon // import "github.com/docker/docker/testutil/daemon" import ( "context" "encoding/json" "fmt" "net/http" "os" "os/exec" "os/user" "path/filepath" "strconv" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/client" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/testutil/request" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" "gotest.tools/v3/assert" ) // LogT is the subset of the testing.TB interface used by the daemon. type LogT interface { Logf(string, ...interface{}) } // nopLog is a no-op implementation of LogT that is used in daemons created by // NewDaemon (where no testing.TB is available). type nopLog struct{} func (nopLog) Logf(string, ...interface{}) {} const ( defaultDockerdBinary = "dockerd" defaultContainerdSocket = "/var/run/docker/containerd/containerd.sock" defaultDockerdRootlessBinary = "dockerd-rootless.sh" defaultUnixSocket = "/var/run/docker.sock" defaultTLSHost = "localhost:2376" ) var errDaemonNotStarted = errors.New("daemon not started") // SockRoot holds the path of the default docker integration daemon socket var SockRoot = filepath.Join(os.TempDir(), "docker-integration") type clientConfig struct { transport *http.Transport scheme string addr string } // Daemon represents a Docker daemon for the testing framework type Daemon struct { Root string Folder string Wait chan error UseDefaultHost bool UseDefaultTLSHost bool id string logFile *os.File cmd *exec.Cmd storageDriver string userlandProxy bool defaultCgroupNamespaceMode string execRoot string experimental bool init bool dockerdBinary string log LogT pidFile string args []string containerdSocket string rootlessUser *user.User rootlessXDGRuntimeDir string // swarm related field swarmListenAddr string SwarmPort int // FIXME(vdemeester) should probably not be exported DefaultAddrPool []string SubnetSize uint32 DataPathPort uint32 OOMScoreAdjust int // cached information CachedInfo types.Info } // NewDaemon returns a Daemon instance to be used for testing. // The daemon will not automatically start. // The daemon will modify and create files under workingDir. func NewDaemon(workingDir string, ops ...Option) (*Daemon, error) { storageDriver := os.Getenv("DOCKER_GRAPHDRIVER") if err := os.MkdirAll(SockRoot, 0700); err != nil { return nil, errors.Wrapf(err, "failed to create daemon socket root %q", SockRoot) } id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) dir := filepath.Join(workingDir, id) daemonFolder, err := filepath.Abs(dir) if err != nil { return nil, err } daemonRoot := filepath.Join(daemonFolder, "root") if err := os.MkdirAll(daemonRoot, 0755); err != nil { return nil, errors.Wrapf(err, "failed to create daemon root %q", daemonRoot) } userlandProxy := true if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { if val, err := strconv.ParseBool(env); err != nil { userlandProxy = val } } d := &Daemon{ id: id, Folder: daemonFolder, Root: daemonRoot, storageDriver: storageDriver, userlandProxy: userlandProxy, // dxr stands for docker-execroot (shortened for avoiding unix(7) path length limitation) execRoot: filepath.Join(os.TempDir(), "dxr", id), dockerdBinary: defaultDockerdBinary, swarmListenAddr: defaultSwarmListenAddr, SwarmPort: DefaultSwarmPort, log: nopLog{}, containerdSocket: defaultContainerdSocket, } for _, op := range ops { op(d) } if d.rootlessUser != nil { if err := os.Chmod(SockRoot, 0777); err != nil { return nil, err } uid, err := strconv.Atoi(d.rootlessUser.Uid) if err != nil { return nil, err } gid, err := strconv.Atoi(d.rootlessUser.Gid) if err != nil { return nil, err } if err := os.Chown(d.Folder, uid, gid); err != nil { return nil, err } if err := os.Chown(d.Root, uid, gid); err != nil { return nil, err } if err := os.MkdirAll(filepath.Dir(d.execRoot), 0700); err != nil { return nil, err } if err := os.Chown(filepath.Dir(d.execRoot), uid, gid); err != nil { return nil, err } if err := os.MkdirAll(d.execRoot, 0700); err != nil { return nil, err } if err := os.Chown(d.execRoot, uid, gid); err != nil { return nil, err } d.rootlessXDGRuntimeDir = filepath.Join(d.Folder, "xdgrun") if err := os.MkdirAll(d.rootlessXDGRuntimeDir, 0700); err != nil { return nil, err } if err := os.Chown(d.rootlessXDGRuntimeDir, uid, gid); err != nil { return nil, err } d.containerdSocket = "" } return d, nil } // New returns a Daemon instance to be used for testing. // This will create a directory such as d123456789 in the folder specified by // $DOCKER_INTEGRATION_DAEMON_DEST or $DEST. // The daemon will not automatically start. func New(t testing.TB, ops ...Option) *Daemon { t.Helper() dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } dest = filepath.Join(dest, t.Name()) assert.Check(t, dest != "", "Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable") if os.Getenv("DOCKER_ROOTLESS") != "" { if os.Getenv("DOCKER_REMAP_ROOT") != "" { t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_REMAP_ROOT currently") } if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { if val, err := strconv.ParseBool(env); err == nil && !val { t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_USERLANDPROXY=false") } } ops = append(ops, WithRootlessUser("unprivilegeduser")) } ops = append(ops, WithOOMScoreAdjust(-500)) d, err := NewDaemon(dest, ops...) assert.NilError(t, err, "could not create daemon at %q", dest) if d.rootlessUser != nil && d.dockerdBinary != defaultDockerdBinary { t.Skipf("DOCKER_ROOTLESS doesn't support specifying non-default dockerd binary path %q", d.dockerdBinary) } return d } // BinaryPath returns the binary and its arguments. func (d *Daemon) BinaryPath() (string, error) { dockerdBinary, err := exec.LookPath(d.dockerdBinary) if err != nil { return "", errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id) } return dockerdBinary, nil } // ContainersNamespace returns the containerd namespace used for containers. func (d *Daemon) ContainersNamespace() string { return d.id } // RootDir returns the root directory of the daemon. func (d *Daemon) RootDir() string { return d.Root } // ID returns the generated id of the daemon func (d *Daemon) ID() string { return d.id } // StorageDriver returns the configured storage driver of the daemon func (d *Daemon) StorageDriver() string { return d.storageDriver } // Sock returns the socket path of the daemon func (d *Daemon) Sock() string { return fmt.Sprintf("unix://" + d.sockPath()) } func (d *Daemon) sockPath() string { return filepath.Join(SockRoot, d.id+".sock") } // LogFileName returns the path the daemon's log file func (d *Daemon) LogFileName() string { return d.logFile.Name() } // ReadLogFile returns the content of the daemon log file func (d *Daemon) ReadLogFile() ([]byte, error) { return os.ReadFile(d.logFile.Name()) } // NewClientT creates new client based on daemon's socket path func (d *Daemon) NewClientT(t testing.TB, extraOpts ...client.Opt) *client.Client { t.Helper() c, err := d.NewClient(extraOpts...) assert.NilError(t, err, "[%s] could not create daemon client", d.id) return c } // NewClient creates new client based on daemon's socket path func (d *Daemon) NewClient(extraOpts ...client.Opt) (*client.Client, error) { clientOpts := []client.Opt{ client.FromEnv, client.WithHost(d.Sock()), } clientOpts = append(clientOpts, extraOpts...) return client.NewClientWithOpts(clientOpts...) } // Cleanup cleans the daemon files : exec root (network namespaces, ...), swarmkit files func (d *Daemon) Cleanup(t testing.TB) { t.Helper() cleanupMount(t, d) cleanupRaftDir(t, d) cleanupNetworkNamespace(t, d) } // Start starts the daemon and return once it is ready to receive requests. func (d *Daemon) Start(t testing.TB, args ...string) { t.Helper() if err := d.StartWithError(args...); err != nil { d.DumpStackAndQuit() // in case the daemon is stuck t.Fatalf("[%s] failed to start daemon with arguments %v : %v", d.id, d.args, err) } } // StartWithError starts the daemon and return once it is ready to receive requests. // It returns an error in case it couldn't start. func (d *Daemon) StartWithError(args ...string) error { logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) if err != nil { return errors.Wrapf(err, "[%s] failed to create logfile", d.id) } return d.StartWithLogFile(logFile, args...) } // StartWithLogFile will start the daemon and attach its streams to a given file. func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { d.handleUserns() dockerdBinary, err := d.BinaryPath() if err != nil { return err } if d.pidFile == "" { d.pidFile = filepath.Join(d.Folder, "docker.pid") } d.args = []string{} if d.rootlessUser != nil { if d.dockerdBinary != defaultDockerdBinary { return errors.Errorf("[%s] DOCKER_ROOTLESS doesn't support non-default dockerd binary path %q", d.id, d.dockerdBinary) } dockerdBinary = "sudo" d.args = append(d.args, "-u", d.rootlessUser.Username, "-E", "XDG_RUNTIME_DIR="+d.rootlessXDGRuntimeDir, "-E", "HOME="+d.rootlessUser.HomeDir, "-E", "PATH="+os.Getenv("PATH"), "--", defaultDockerdRootlessBinary, ) } d.args = append(d.args, "--data-root", d.Root, "--exec-root", d.execRoot, "--pidfile", d.pidFile, fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), "--containerd-namespace", d.id, "--containerd-plugins-namespace", d.id+"p", ) if d.containerdSocket != "" { d.args = append(d.args, "--containerd", d.containerdSocket) } if d.defaultCgroupNamespaceMode != "" { d.args = append(d.args, "--default-cgroupns-mode", d.defaultCgroupNamespaceMode) } if d.experimental { d.args = append(d.args, "--experimental") } if d.init { d.args = append(d.args, "--init") } if !(d.UseDefaultHost || d.UseDefaultTLSHost) { d.args = append(d.args, "--host", d.Sock()) } if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { d.args = append(d.args, "--userns-remap", root) } // If we don't explicitly set the log-level or debug flag(-D) then // turn on debug mode foundLog := false foundSd := false for _, a := range providedArgs { if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { foundLog = true } if strings.Contains(a, "--storage-driver") { foundSd = true } } if !foundLog { d.args = append(d.args, "--debug") } if d.storageDriver != "" && !foundSd { d.args = append(d.args, "--storage-driver", d.storageDriver) } d.args = append(d.args, providedArgs...) d.cmd = exec.Command(dockerdBinary, d.args...) d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") d.cmd.Stdout = out d.cmd.Stderr = out d.logFile = out if d.rootlessUser != nil { // sudo requires this for propagating signals setsid(d.cmd) } if err := d.cmd.Start(); err != nil { return errors.Wrapf(err, "[%s] could not start daemon container", d.id) } wait := make(chan error, 1) go func() { ret := d.cmd.Wait() d.log.Logf("[%s] exiting daemon", d.id) // If we send before logging, we might accidentally log _after_ the test is done. // As of Go 1.12, this incurs a panic instead of silently being dropped. wait <- ret close(wait) }() d.Wait = wait clientConfig, err := d.getClientConfig() if err != nil { return err } client := &http.Client{ Transport: clientConfig.transport, } req, err := http.NewRequest(http.MethodGet, "/_ping", nil) if err != nil { return errors.Wrapf(err, "[%s] could not create new request", d.id) } req.URL.Host = clientConfig.addr req.URL.Scheme = clientConfig.scheme ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() // make sure daemon is ready to receive requests for i := 0; ; i++ { d.log.Logf("[%s] waiting for daemon to start", d.id) select { case <-ctx.Done(): return errors.Wrapf(ctx.Err(), "[%s] daemon exited and never started", d.id) case err := <-d.Wait: return errors.Wrapf(err, "[%s] daemon exited during startup", d.id) default: rctx, rcancel := context.WithTimeout(context.TODO(), 2*time.Second) defer rcancel() resp, err := client.Do(req.WithContext(rctx)) if err != nil { if i > 2 { // don't log the first couple, this ends up just being noise d.log.Logf("[%s] error pinging daemon on start: %v", d.id, err) } select { case <-ctx.Done(): case <-time.After(500 * time.Millisecond): } continue } resp.Body.Close() if resp.StatusCode != http.StatusOK { d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status) } d.log.Logf("[%s] daemon started\n", d.id) d.Root, err = d.queryRootDir() if err != nil { return errors.Wrapf(err, "[%s] error querying daemon for root directory", d.id) } return nil } } } // StartWithBusybox will first start the daemon with Daemon.Start() // then save the busybox image from the main daemon and load it into this Daemon instance. func (d *Daemon) StartWithBusybox(t testing.TB, arg ...string) { t.Helper() d.Start(t, arg...) d.LoadBusybox(t) } // Kill will send a SIGKILL to the daemon func (d *Daemon) Kill() error { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } defer func() { d.logFile.Close() d.cmd = nil }() if err := d.cmd.Process.Kill(); err != nil { return err } if d.pidFile != "" { _ = os.Remove(d.pidFile) } return nil } // Pid returns the pid of the daemon func (d *Daemon) Pid() int { return d.cmd.Process.Pid } // Interrupt stops the daemon by sending it an Interrupt signal func (d *Daemon) Interrupt() error { return d.Signal(os.Interrupt) } // Signal sends the specified signal to the daemon if running func (d *Daemon) Signal(signal os.Signal) error { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } return d.cmd.Process.Signal(signal) } // DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its // stack to its log file and exit // This is used primarily for gathering debug information on test timeout func (d *Daemon) DumpStackAndQuit() { if d.cmd == nil || d.cmd.Process == nil { return } SignalDaemonDump(d.cmd.Process.Pid) } // Stop will send a SIGINT every second and wait for the daemon to stop. // If it times out, a SIGKILL is sent. // Stop will not delete the daemon directory. If a purged daemon is needed, // instantiate a new one with NewDaemon. // If an error occurs while starting the daemon, the test will fail. func (d *Daemon) Stop(t testing.TB) { t.Helper() err := d.StopWithError() if err != nil { if err != errDaemonNotStarted { t.Fatalf("[%s] error while stopping the daemon: %v", d.id, err) } else { t.Logf("[%s] daemon is not started", d.id) } } } // StopWithError will send a SIGINT every second and wait for the daemon to stop. // If it timeouts, a SIGKILL is sent. // Stop will not delete the daemon directory. If a purged daemon is needed, // instantiate a new one with NewDaemon. func (d *Daemon) StopWithError() (err error) { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } defer func() { if err != nil { d.log.Logf("[%s] error while stopping daemon: %v", d.id, err) } else { d.log.Logf("[%s] daemon stopped", d.id) if d.pidFile != "" { _ = os.Remove(d.pidFile) } } if err := d.logFile.Close(); err != nil { d.log.Logf("[%s] failed to close daemon logfile: %v", d.id, err) } d.cmd = nil }() i := 1 ticker := time.NewTicker(time.Second) defer ticker.Stop() tick := ticker.C d.log.Logf("[%s] stopping daemon", d.id) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { if strings.Contains(err.Error(), "os: process already finished") { return errDaemonNotStarted } return errors.Wrapf(err, "[%s] could not send signal", d.id) } out1: for { select { case err := <-d.Wait: return err case <-time.After(20 * time.Second): // time for stopping jobs and run onShutdown hooks d.log.Logf("[%s] daemon stop timed out after 20 seconds", d.id) break out1 } } out2: for { select { case err := <-d.Wait: return err case <-tick: i++ if i > 5 { d.log.Logf("[%s] tried to interrupt daemon for %d times, now try to kill it", d.id, i) break out2 } d.log.Logf("[%d] attempt #%d/5: daemon is still running with pid %d", i, d.cmd.Process.Pid) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { return errors.Wrapf(err, "[%s] attempt #%d/5 could not send signal", d.id, i) } } } if err := d.cmd.Process.Kill(); err != nil { d.log.Logf("[%s] failed to kill daemon: %v", d.id, err) return err } return nil } // Restart will restart the daemon by first stopping it and the starting it. // If an error occurs while starting the daemon, the test will fail. func (d *Daemon) Restart(t testing.TB, args ...string) { t.Helper() d.Stop(t) d.Start(t, args...) } // RestartWithError will restart the daemon by first stopping it and then starting it. func (d *Daemon) RestartWithError(arg ...string) error { if err := d.StopWithError(); err != nil { return err } return d.StartWithError(arg...) } func (d *Daemon) handleUserns() { // in the case of tests running a user namespace-enabled daemon, we have resolved // d.Root to be the actual final path of the graph dir after the "uid.gid" of // remapped root is added--we need to subtract it from the path before calling // start or else we will continue making subdirectories rather than truly restarting // with the same location/root: if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { d.Root = filepath.Dir(d.Root) } } // ReloadConfig asks the daemon to reload its configuration func (d *Daemon) ReloadConfig() error { if d.cmd == nil || d.cmd.Process == nil { return errors.New("daemon is not running") } errCh := make(chan error, 1) started := make(chan struct{}) go func() { _, body, err := request.Get("/events", request.Host(d.Sock())) close(started) if err != nil { errCh <- err return } defer body.Close() dec := json.NewDecoder(body) for { var e events.Message if err := dec.Decode(&e); err != nil { errCh <- err return } if e.Type != events.DaemonEventType { continue } if e.Action != "reload" { continue } close(errCh) // notify that we are done return } }() <-started if err := signalDaemonReload(d.cmd.Process.Pid); err != nil { return errors.Wrapf(err, "[%s] error signaling daemon reload", d.id) } select { case err := <-errCh: if err != nil { return errors.Wrapf(err, "[%s] error waiting for daemon reload event", d.id) } case <-time.After(30 * time.Second): return errors.Errorf("[%s] daemon reload event timed out after 30 seconds", d.id) } return nil } // LoadBusybox image into the daemon func (d *Daemon) LoadBusybox(t testing.TB) { t.Helper() clientHost, err := client.NewClientWithOpts(client.FromEnv) assert.NilError(t, err, "[%s] failed to create client", d.id) defer clientHost.Close() ctx := context.Background() reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"}) assert.NilError(t, err, "[%s] failed to download busybox", d.id) defer reader.Close() c := d.NewClientT(t) defer c.Close() resp, err := c.ImageLoad(ctx, reader, true) assert.NilError(t, err, "[%s] failed to load busybox", d.id) defer resp.Body.Close() } func (d *Daemon) getClientConfig() (*clientConfig, error) { var ( transport *http.Transport scheme string addr string proto string ) if d.UseDefaultTLSHost { option := &tlsconfig.Options{ CAFile: "fixtures/https/ca.pem", CertFile: "fixtures/https/client-cert.pem", KeyFile: "fixtures/https/client-key.pem", } tlsConfig, err := tlsconfig.Client(*option) if err != nil { return nil, err } transport = &http.Transport{ TLSClientConfig: tlsConfig, } addr = defaultTLSHost scheme = "https" proto = "tcp" } else if d.UseDefaultHost { addr = defaultUnixSocket proto = "unix" scheme = "http" transport = &http.Transport{} } else { addr = d.sockPath() proto = "unix" scheme = "http" transport = &http.Transport{} } if err := sockets.ConfigureTransport(transport, proto, addr); err != nil { return nil, err } transport.DisableKeepAlives = true if proto == "unix" { addr = filepath.Base(addr) } return &clientConfig{ transport: transport, scheme: scheme, addr: addr, }, nil } func (d *Daemon) queryRootDir() (string, error) { // update daemon root by asking /info endpoint (to support user // namespaced daemon with root remapped uid.gid directory) clientConfig, err := d.getClientConfig() if err != nil { return "", err } c := &http.Client{ Transport: clientConfig.transport, } req, err := http.NewRequest(http.MethodGet, "/info", nil) if err != nil { return "", err } req.Header.Set("Content-Type", "application/json") req.URL.Host = clientConfig.addr req.URL.Scheme = clientConfig.scheme resp, err := c.Do(req) if err != nil { return "", err } body := ioutils.NewReadCloserWrapper(resp.Body, func() error { return resp.Body.Close() }) type Info struct { DockerRootDir string } var b []byte var i Info b, err = request.ReadBody(body) if err == nil && resp.StatusCode == http.StatusOK { // read the docker root dir if err = json.Unmarshal(b, &i); err == nil { return i.DockerRootDir, nil } } return "", err } // Info returns the info struct for this daemon func (d *Daemon) Info(t testing.TB) types.Info { t.Helper() c := d.NewClientT(t) info, err := c.Info(context.Background()) assert.NilError(t, err) assert.NilError(t, c.Close()) return info } // cleanupRaftDir removes swarmkit wal files if present func cleanupRaftDir(t testing.TB, d *Daemon) { t.Helper() for _, p := range []string{"wal", "wal-v3-encrypted", "snap-v3-encrypted"} { dir := filepath.Join(d.Root, "swarm/raft", p) if err := os.RemoveAll(dir); err != nil { t.Logf("[%s] error removing %v: %v", d.id, dir, err) } } }
package daemon // import "github.com/docker/docker/testutil/daemon" import ( "context" "encoding/json" "fmt" "net/http" "os" "os/exec" "os/user" "path/filepath" "strconv" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/client" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/testutil/request" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" "gotest.tools/v3/assert" ) // LogT is the subset of the testing.TB interface used by the daemon. type LogT interface { Logf(string, ...interface{}) } // nopLog is a no-op implementation of LogT that is used in daemons created by // NewDaemon (where no testing.TB is available). type nopLog struct{} func (nopLog) Logf(string, ...interface{}) {} const ( defaultDockerdBinary = "dockerd" defaultContainerdSocket = "/var/run/docker/containerd/containerd.sock" defaultDockerdRootlessBinary = "dockerd-rootless.sh" defaultUnixSocket = "/var/run/docker.sock" defaultTLSHost = "localhost:2376" ) var errDaemonNotStarted = errors.New("daemon not started") // SockRoot holds the path of the default docker integration daemon socket var SockRoot = filepath.Join(os.TempDir(), "docker-integration") type clientConfig struct { transport *http.Transport scheme string addr string } // Daemon represents a Docker daemon for the testing framework type Daemon struct { Root string Folder string Wait chan error UseDefaultHost bool UseDefaultTLSHost bool id string logFile *os.File cmd *exec.Cmd storageDriver string userlandProxy bool defaultCgroupNamespaceMode string execRoot string experimental bool init bool dockerdBinary string log LogT pidFile string args []string containerdSocket string rootlessUser *user.User rootlessXDGRuntimeDir string // swarm related field swarmListenAddr string SwarmPort int // FIXME(vdemeester) should probably not be exported DefaultAddrPool []string SubnetSize uint32 DataPathPort uint32 OOMScoreAdjust int // cached information CachedInfo types.Info } // NewDaemon returns a Daemon instance to be used for testing. // The daemon will not automatically start. // The daemon will modify and create files under workingDir. func NewDaemon(workingDir string, ops ...Option) (*Daemon, error) { storageDriver := os.Getenv("DOCKER_GRAPHDRIVER") if err := os.MkdirAll(SockRoot, 0700); err != nil { return nil, errors.Wrapf(err, "failed to create daemon socket root %q", SockRoot) } id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) dir := filepath.Join(workingDir, id) daemonFolder, err := filepath.Abs(dir) if err != nil { return nil, err } daemonRoot := filepath.Join(daemonFolder, "root") if err := os.MkdirAll(daemonRoot, 0755); err != nil { return nil, errors.Wrapf(err, "failed to create daemon root %q", daemonRoot) } userlandProxy := true if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { if val, err := strconv.ParseBool(env); err != nil { userlandProxy = val } } d := &Daemon{ id: id, Folder: daemonFolder, Root: daemonRoot, storageDriver: storageDriver, userlandProxy: userlandProxy, // dxr stands for docker-execroot (shortened for avoiding unix(7) path length limitation) execRoot: filepath.Join(os.TempDir(), "dxr", id), dockerdBinary: defaultDockerdBinary, swarmListenAddr: defaultSwarmListenAddr, SwarmPort: DefaultSwarmPort, log: nopLog{}, containerdSocket: defaultContainerdSocket, } for _, op := range ops { op(d) } if d.rootlessUser != nil { if err := os.Chmod(SockRoot, 0777); err != nil { return nil, err } uid, err := strconv.Atoi(d.rootlessUser.Uid) if err != nil { return nil, err } gid, err := strconv.Atoi(d.rootlessUser.Gid) if err != nil { return nil, err } if err := os.Chown(d.Folder, uid, gid); err != nil { return nil, err } if err := os.Chown(d.Root, uid, gid); err != nil { return nil, err } if err := os.MkdirAll(filepath.Dir(d.execRoot), 0700); err != nil { return nil, err } if err := os.Chown(filepath.Dir(d.execRoot), uid, gid); err != nil { return nil, err } if err := os.MkdirAll(d.execRoot, 0700); err != nil { return nil, err } if err := os.Chown(d.execRoot, uid, gid); err != nil { return nil, err } d.rootlessXDGRuntimeDir = filepath.Join(d.Folder, "xdgrun") if err := os.MkdirAll(d.rootlessXDGRuntimeDir, 0700); err != nil { return nil, err } if err := os.Chown(d.rootlessXDGRuntimeDir, uid, gid); err != nil { return nil, err } d.containerdSocket = "" } return d, nil } // New returns a Daemon instance to be used for testing. // This will create a directory such as d123456789 in the folder specified by // $DOCKER_INTEGRATION_DAEMON_DEST or $DEST. // The daemon will not automatically start. func New(t testing.TB, ops ...Option) *Daemon { t.Helper() dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } dest = filepath.Join(dest, t.Name()) assert.Check(t, dest != "", "Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable") if os.Getenv("DOCKER_ROOTLESS") != "" { if os.Getenv("DOCKER_REMAP_ROOT") != "" { t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_REMAP_ROOT currently") } if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { if val, err := strconv.ParseBool(env); err == nil && !val { t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_USERLANDPROXY=false") } } ops = append(ops, WithRootlessUser("unprivilegeduser")) } ops = append(ops, WithOOMScoreAdjust(-500)) d, err := NewDaemon(dest, ops...) assert.NilError(t, err, "could not create daemon at %q", dest) if d.rootlessUser != nil && d.dockerdBinary != defaultDockerdBinary { t.Skipf("DOCKER_ROOTLESS doesn't support specifying non-default dockerd binary path %q", d.dockerdBinary) } return d } // BinaryPath returns the binary and its arguments. func (d *Daemon) BinaryPath() (string, error) { dockerdBinary, err := exec.LookPath(d.dockerdBinary) if err != nil { return "", errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id) } return dockerdBinary, nil } // ContainersNamespace returns the containerd namespace used for containers. func (d *Daemon) ContainersNamespace() string { return d.id } // RootDir returns the root directory of the daemon. func (d *Daemon) RootDir() string { return d.Root } // ID returns the generated id of the daemon func (d *Daemon) ID() string { return d.id } // StorageDriver returns the configured storage driver of the daemon func (d *Daemon) StorageDriver() string { return d.storageDriver } // Sock returns the socket path of the daemon func (d *Daemon) Sock() string { return fmt.Sprintf("unix://" + d.sockPath()) } func (d *Daemon) sockPath() string { return filepath.Join(SockRoot, d.id+".sock") } // LogFileName returns the path the daemon's log file func (d *Daemon) LogFileName() string { return d.logFile.Name() } // ReadLogFile returns the content of the daemon log file func (d *Daemon) ReadLogFile() ([]byte, error) { _ = d.logFile.Sync() return os.ReadFile(d.logFile.Name()) } // NewClientT creates new client based on daemon's socket path func (d *Daemon) NewClientT(t testing.TB, extraOpts ...client.Opt) *client.Client { t.Helper() c, err := d.NewClient(extraOpts...) assert.NilError(t, err, "[%s] could not create daemon client", d.id) return c } // NewClient creates new client based on daemon's socket path func (d *Daemon) NewClient(extraOpts ...client.Opt) (*client.Client, error) { clientOpts := []client.Opt{ client.FromEnv, client.WithHost(d.Sock()), } clientOpts = append(clientOpts, extraOpts...) return client.NewClientWithOpts(clientOpts...) } // Cleanup cleans the daemon files : exec root (network namespaces, ...), swarmkit files func (d *Daemon) Cleanup(t testing.TB) { t.Helper() cleanupMount(t, d) cleanupRaftDir(t, d) cleanupNetworkNamespace(t, d) } // Start starts the daemon and return once it is ready to receive requests. func (d *Daemon) Start(t testing.TB, args ...string) { t.Helper() if err := d.StartWithError(args...); err != nil { d.DumpStackAndQuit() // in case the daemon is stuck t.Fatalf("[%s] failed to start daemon with arguments %v : %v", d.id, d.args, err) } } // StartWithError starts the daemon and return once it is ready to receive requests. // It returns an error in case it couldn't start. func (d *Daemon) StartWithError(args ...string) error { logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) if err != nil { return errors.Wrapf(err, "[%s] failed to create logfile", d.id) } return d.StartWithLogFile(logFile, args...) } // StartWithLogFile will start the daemon and attach its streams to a given file. func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { d.handleUserns() dockerdBinary, err := d.BinaryPath() if err != nil { return err } if d.pidFile == "" { d.pidFile = filepath.Join(d.Folder, "docker.pid") } d.args = []string{} if d.rootlessUser != nil { if d.dockerdBinary != defaultDockerdBinary { return errors.Errorf("[%s] DOCKER_ROOTLESS doesn't support non-default dockerd binary path %q", d.id, d.dockerdBinary) } dockerdBinary = "sudo" d.args = append(d.args, "-u", d.rootlessUser.Username, "-E", "XDG_RUNTIME_DIR="+d.rootlessXDGRuntimeDir, "-E", "HOME="+d.rootlessUser.HomeDir, "-E", "PATH="+os.Getenv("PATH"), "--", defaultDockerdRootlessBinary, ) } d.args = append(d.args, "--data-root", d.Root, "--exec-root", d.execRoot, "--pidfile", d.pidFile, fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), "--containerd-namespace", d.id, "--containerd-plugins-namespace", d.id+"p", ) if d.containerdSocket != "" { d.args = append(d.args, "--containerd", d.containerdSocket) } if d.defaultCgroupNamespaceMode != "" { d.args = append(d.args, "--default-cgroupns-mode", d.defaultCgroupNamespaceMode) } if d.experimental { d.args = append(d.args, "--experimental") } if d.init { d.args = append(d.args, "--init") } if !(d.UseDefaultHost || d.UseDefaultTLSHost) { d.args = append(d.args, "--host", d.Sock()) } if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { d.args = append(d.args, "--userns-remap", root) } // If we don't explicitly set the log-level or debug flag(-D) then // turn on debug mode foundLog := false foundSd := false for _, a := range providedArgs { if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { foundLog = true } if strings.Contains(a, "--storage-driver") { foundSd = true } } if !foundLog { d.args = append(d.args, "--debug") } if d.storageDriver != "" && !foundSd { d.args = append(d.args, "--storage-driver", d.storageDriver) } d.args = append(d.args, providedArgs...) d.cmd = exec.Command(dockerdBinary, d.args...) d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") d.cmd.Stdout = out d.cmd.Stderr = out d.logFile = out if d.rootlessUser != nil { // sudo requires this for propagating signals setsid(d.cmd) } if err := d.cmd.Start(); err != nil { return errors.Wrapf(err, "[%s] could not start daemon container", d.id) } wait := make(chan error, 1) go func() { ret := d.cmd.Wait() d.log.Logf("[%s] exiting daemon", d.id) // If we send before logging, we might accidentally log _after_ the test is done. // As of Go 1.12, this incurs a panic instead of silently being dropped. wait <- ret close(wait) }() d.Wait = wait clientConfig, err := d.getClientConfig() if err != nil { return err } client := &http.Client{ Transport: clientConfig.transport, } req, err := http.NewRequest(http.MethodGet, "/_ping", nil) if err != nil { return errors.Wrapf(err, "[%s] could not create new request", d.id) } req.URL.Host = clientConfig.addr req.URL.Scheme = clientConfig.scheme ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() // make sure daemon is ready to receive requests for i := 0; ; i++ { d.log.Logf("[%s] waiting for daemon to start", d.id) select { case <-ctx.Done(): return errors.Wrapf(ctx.Err(), "[%s] daemon exited and never started", d.id) case err := <-d.Wait: return errors.Wrapf(err, "[%s] daemon exited during startup", d.id) default: rctx, rcancel := context.WithTimeout(context.TODO(), 2*time.Second) defer rcancel() resp, err := client.Do(req.WithContext(rctx)) if err != nil { if i > 2 { // don't log the first couple, this ends up just being noise d.log.Logf("[%s] error pinging daemon on start: %v", d.id, err) } select { case <-ctx.Done(): case <-time.After(500 * time.Millisecond): } continue } resp.Body.Close() if resp.StatusCode != http.StatusOK { d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status) } d.log.Logf("[%s] daemon started\n", d.id) d.Root, err = d.queryRootDir() if err != nil { return errors.Wrapf(err, "[%s] error querying daemon for root directory", d.id) } return nil } } } // StartWithBusybox will first start the daemon with Daemon.Start() // then save the busybox image from the main daemon and load it into this Daemon instance. func (d *Daemon) StartWithBusybox(t testing.TB, arg ...string) { t.Helper() d.Start(t, arg...) d.LoadBusybox(t) } // Kill will send a SIGKILL to the daemon func (d *Daemon) Kill() error { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } defer func() { d.logFile.Close() d.cmd = nil }() if err := d.cmd.Process.Kill(); err != nil { return err } if d.pidFile != "" { _ = os.Remove(d.pidFile) } return nil } // Pid returns the pid of the daemon func (d *Daemon) Pid() int { return d.cmd.Process.Pid } // Interrupt stops the daemon by sending it an Interrupt signal func (d *Daemon) Interrupt() error { return d.Signal(os.Interrupt) } // Signal sends the specified signal to the daemon if running func (d *Daemon) Signal(signal os.Signal) error { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } return d.cmd.Process.Signal(signal) } // DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its // stack to its log file and exit // This is used primarily for gathering debug information on test timeout func (d *Daemon) DumpStackAndQuit() { if d.cmd == nil || d.cmd.Process == nil { return } SignalDaemonDump(d.cmd.Process.Pid) } // Stop will send a SIGINT every second and wait for the daemon to stop. // If it times out, a SIGKILL is sent. // Stop will not delete the daemon directory. If a purged daemon is needed, // instantiate a new one with NewDaemon. // If an error occurs while starting the daemon, the test will fail. func (d *Daemon) Stop(t testing.TB) { t.Helper() err := d.StopWithError() if err != nil { if err != errDaemonNotStarted { t.Fatalf("[%s] error while stopping the daemon: %v", d.id, err) } else { t.Logf("[%s] daemon is not started", d.id) } } } // StopWithError will send a SIGINT every second and wait for the daemon to stop. // If it timeouts, a SIGKILL is sent. // Stop will not delete the daemon directory. If a purged daemon is needed, // instantiate a new one with NewDaemon. func (d *Daemon) StopWithError() (err error) { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } defer func() { if err != nil { d.log.Logf("[%s] error while stopping daemon: %v", d.id, err) } else { d.log.Logf("[%s] daemon stopped", d.id) if d.pidFile != "" { _ = os.Remove(d.pidFile) } } if err := d.logFile.Close(); err != nil { d.log.Logf("[%s] failed to close daemon logfile: %v", d.id, err) } d.cmd = nil }() i := 1 ticker := time.NewTicker(time.Second) defer ticker.Stop() tick := ticker.C d.log.Logf("[%s] stopping daemon", d.id) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { if strings.Contains(err.Error(), "os: process already finished") { return errDaemonNotStarted } return errors.Wrapf(err, "[%s] could not send signal", d.id) } out1: for { select { case err := <-d.Wait: return err case <-time.After(20 * time.Second): // time for stopping jobs and run onShutdown hooks d.log.Logf("[%s] daemon stop timed out after 20 seconds", d.id) break out1 } } out2: for { select { case err := <-d.Wait: return err case <-tick: i++ if i > 5 { d.log.Logf("[%s] tried to interrupt daemon for %d times, now try to kill it", d.id, i) break out2 } d.log.Logf("[%d] attempt #%d/5: daemon is still running with pid %d", i, d.cmd.Process.Pid) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { return errors.Wrapf(err, "[%s] attempt #%d/5 could not send signal", d.id, i) } } } if err := d.cmd.Process.Kill(); err != nil { d.log.Logf("[%s] failed to kill daemon: %v", d.id, err) return err } return nil } // Restart will restart the daemon by first stopping it and the starting it. // If an error occurs while starting the daemon, the test will fail. func (d *Daemon) Restart(t testing.TB, args ...string) { t.Helper() d.Stop(t) d.Start(t, args...) } // RestartWithError will restart the daemon by first stopping it and then starting it. func (d *Daemon) RestartWithError(arg ...string) error { if err := d.StopWithError(); err != nil { return err } return d.StartWithError(arg...) } func (d *Daemon) handleUserns() { // in the case of tests running a user namespace-enabled daemon, we have resolved // d.Root to be the actual final path of the graph dir after the "uid.gid" of // remapped root is added--we need to subtract it from the path before calling // start or else we will continue making subdirectories rather than truly restarting // with the same location/root: if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { d.Root = filepath.Dir(d.Root) } } // ReloadConfig asks the daemon to reload its configuration func (d *Daemon) ReloadConfig() error { if d.cmd == nil || d.cmd.Process == nil { return errors.New("daemon is not running") } errCh := make(chan error, 1) started := make(chan struct{}) go func() { _, body, err := request.Get("/events", request.Host(d.Sock())) close(started) if err != nil { errCh <- err return } defer body.Close() dec := json.NewDecoder(body) for { var e events.Message if err := dec.Decode(&e); err != nil { errCh <- err return } if e.Type != events.DaemonEventType { continue } if e.Action != "reload" { continue } close(errCh) // notify that we are done return } }() <-started if err := signalDaemonReload(d.cmd.Process.Pid); err != nil { return errors.Wrapf(err, "[%s] error signaling daemon reload", d.id) } select { case err := <-errCh: if err != nil { return errors.Wrapf(err, "[%s] error waiting for daemon reload event", d.id) } case <-time.After(30 * time.Second): return errors.Errorf("[%s] daemon reload event timed out after 30 seconds", d.id) } return nil } // LoadBusybox image into the daemon func (d *Daemon) LoadBusybox(t testing.TB) { t.Helper() clientHost, err := client.NewClientWithOpts(client.FromEnv) assert.NilError(t, err, "[%s] failed to create client", d.id) defer clientHost.Close() ctx := context.Background() reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"}) assert.NilError(t, err, "[%s] failed to download busybox", d.id) defer reader.Close() c := d.NewClientT(t) defer c.Close() resp, err := c.ImageLoad(ctx, reader, true) assert.NilError(t, err, "[%s] failed to load busybox", d.id) defer resp.Body.Close() } func (d *Daemon) getClientConfig() (*clientConfig, error) { var ( transport *http.Transport scheme string addr string proto string ) if d.UseDefaultTLSHost { option := &tlsconfig.Options{ CAFile: "fixtures/https/ca.pem", CertFile: "fixtures/https/client-cert.pem", KeyFile: "fixtures/https/client-key.pem", } tlsConfig, err := tlsconfig.Client(*option) if err != nil { return nil, err } transport = &http.Transport{ TLSClientConfig: tlsConfig, } addr = defaultTLSHost scheme = "https" proto = "tcp" } else if d.UseDefaultHost { addr = defaultUnixSocket proto = "unix" scheme = "http" transport = &http.Transport{} } else { addr = d.sockPath() proto = "unix" scheme = "http" transport = &http.Transport{} } if err := sockets.ConfigureTransport(transport, proto, addr); err != nil { return nil, err } transport.DisableKeepAlives = true if proto == "unix" { addr = filepath.Base(addr) } return &clientConfig{ transport: transport, scheme: scheme, addr: addr, }, nil } func (d *Daemon) queryRootDir() (string, error) { // update daemon root by asking /info endpoint (to support user // namespaced daemon with root remapped uid.gid directory) clientConfig, err := d.getClientConfig() if err != nil { return "", err } c := &http.Client{ Transport: clientConfig.transport, } req, err := http.NewRequest(http.MethodGet, "/info", nil) if err != nil { return "", err } req.Header.Set("Content-Type", "application/json") req.URL.Host = clientConfig.addr req.URL.Scheme = clientConfig.scheme resp, err := c.Do(req) if err != nil { return "", err } body := ioutils.NewReadCloserWrapper(resp.Body, func() error { return resp.Body.Close() }) type Info struct { DockerRootDir string } var b []byte var i Info b, err = request.ReadBody(body) if err == nil && resp.StatusCode == http.StatusOK { // read the docker root dir if err = json.Unmarshal(b, &i); err == nil { return i.DockerRootDir, nil } } return "", err } // Info returns the info struct for this daemon func (d *Daemon) Info(t testing.TB) types.Info { t.Helper() c := d.NewClientT(t) info, err := c.Info(context.Background()) assert.NilError(t, err) assert.NilError(t, c.Close()) return info } // cleanupRaftDir removes swarmkit wal files if present func cleanupRaftDir(t testing.TB, d *Daemon) { t.Helper() for _, p := range []string{"wal", "wal-v3-encrypted", "snap-v3-encrypted"} { dir := filepath.Join(d.Root, "swarm/raft", p) if err := os.RemoveAll(dir); err != nil { t.Logf("[%s] error removing %v: %v", d.id, dir, err) } } }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
YOLO; wondering if it's just not written to disk the moment we try to read it 🤔
thaJeztah
4,469
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
testutil/daemon/daemon.go
package daemon // import "github.com/docker/docker/testutil/daemon" import ( "context" "encoding/json" "fmt" "net/http" "os" "os/exec" "os/user" "path/filepath" "strconv" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/client" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/testutil/request" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" "gotest.tools/v3/assert" ) // LogT is the subset of the testing.TB interface used by the daemon. type LogT interface { Logf(string, ...interface{}) } // nopLog is a no-op implementation of LogT that is used in daemons created by // NewDaemon (where no testing.TB is available). type nopLog struct{} func (nopLog) Logf(string, ...interface{}) {} const ( defaultDockerdBinary = "dockerd" defaultContainerdSocket = "/var/run/docker/containerd/containerd.sock" defaultDockerdRootlessBinary = "dockerd-rootless.sh" defaultUnixSocket = "/var/run/docker.sock" defaultTLSHost = "localhost:2376" ) var errDaemonNotStarted = errors.New("daemon not started") // SockRoot holds the path of the default docker integration daemon socket var SockRoot = filepath.Join(os.TempDir(), "docker-integration") type clientConfig struct { transport *http.Transport scheme string addr string } // Daemon represents a Docker daemon for the testing framework type Daemon struct { Root string Folder string Wait chan error UseDefaultHost bool UseDefaultTLSHost bool id string logFile *os.File cmd *exec.Cmd storageDriver string userlandProxy bool defaultCgroupNamespaceMode string execRoot string experimental bool init bool dockerdBinary string log LogT pidFile string args []string containerdSocket string rootlessUser *user.User rootlessXDGRuntimeDir string // swarm related field swarmListenAddr string SwarmPort int // FIXME(vdemeester) should probably not be exported DefaultAddrPool []string SubnetSize uint32 DataPathPort uint32 OOMScoreAdjust int // cached information CachedInfo types.Info } // NewDaemon returns a Daemon instance to be used for testing. // The daemon will not automatically start. // The daemon will modify and create files under workingDir. func NewDaemon(workingDir string, ops ...Option) (*Daemon, error) { storageDriver := os.Getenv("DOCKER_GRAPHDRIVER") if err := os.MkdirAll(SockRoot, 0700); err != nil { return nil, errors.Wrapf(err, "failed to create daemon socket root %q", SockRoot) } id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) dir := filepath.Join(workingDir, id) daemonFolder, err := filepath.Abs(dir) if err != nil { return nil, err } daemonRoot := filepath.Join(daemonFolder, "root") if err := os.MkdirAll(daemonRoot, 0755); err != nil { return nil, errors.Wrapf(err, "failed to create daemon root %q", daemonRoot) } userlandProxy := true if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { if val, err := strconv.ParseBool(env); err != nil { userlandProxy = val } } d := &Daemon{ id: id, Folder: daemonFolder, Root: daemonRoot, storageDriver: storageDriver, userlandProxy: userlandProxy, // dxr stands for docker-execroot (shortened for avoiding unix(7) path length limitation) execRoot: filepath.Join(os.TempDir(), "dxr", id), dockerdBinary: defaultDockerdBinary, swarmListenAddr: defaultSwarmListenAddr, SwarmPort: DefaultSwarmPort, log: nopLog{}, containerdSocket: defaultContainerdSocket, } for _, op := range ops { op(d) } if d.rootlessUser != nil { if err := os.Chmod(SockRoot, 0777); err != nil { return nil, err } uid, err := strconv.Atoi(d.rootlessUser.Uid) if err != nil { return nil, err } gid, err := strconv.Atoi(d.rootlessUser.Gid) if err != nil { return nil, err } if err := os.Chown(d.Folder, uid, gid); err != nil { return nil, err } if err := os.Chown(d.Root, uid, gid); err != nil { return nil, err } if err := os.MkdirAll(filepath.Dir(d.execRoot), 0700); err != nil { return nil, err } if err := os.Chown(filepath.Dir(d.execRoot), uid, gid); err != nil { return nil, err } if err := os.MkdirAll(d.execRoot, 0700); err != nil { return nil, err } if err := os.Chown(d.execRoot, uid, gid); err != nil { return nil, err } d.rootlessXDGRuntimeDir = filepath.Join(d.Folder, "xdgrun") if err := os.MkdirAll(d.rootlessXDGRuntimeDir, 0700); err != nil { return nil, err } if err := os.Chown(d.rootlessXDGRuntimeDir, uid, gid); err != nil { return nil, err } d.containerdSocket = "" } return d, nil } // New returns a Daemon instance to be used for testing. // This will create a directory such as d123456789 in the folder specified by // $DOCKER_INTEGRATION_DAEMON_DEST or $DEST. // The daemon will not automatically start. func New(t testing.TB, ops ...Option) *Daemon { t.Helper() dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } dest = filepath.Join(dest, t.Name()) assert.Check(t, dest != "", "Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable") if os.Getenv("DOCKER_ROOTLESS") != "" { if os.Getenv("DOCKER_REMAP_ROOT") != "" { t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_REMAP_ROOT currently") } if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { if val, err := strconv.ParseBool(env); err == nil && !val { t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_USERLANDPROXY=false") } } ops = append(ops, WithRootlessUser("unprivilegeduser")) } ops = append(ops, WithOOMScoreAdjust(-500)) d, err := NewDaemon(dest, ops...) assert.NilError(t, err, "could not create daemon at %q", dest) if d.rootlessUser != nil && d.dockerdBinary != defaultDockerdBinary { t.Skipf("DOCKER_ROOTLESS doesn't support specifying non-default dockerd binary path %q", d.dockerdBinary) } return d } // BinaryPath returns the binary and its arguments. func (d *Daemon) BinaryPath() (string, error) { dockerdBinary, err := exec.LookPath(d.dockerdBinary) if err != nil { return "", errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id) } return dockerdBinary, nil } // ContainersNamespace returns the containerd namespace used for containers. func (d *Daemon) ContainersNamespace() string { return d.id } // RootDir returns the root directory of the daemon. func (d *Daemon) RootDir() string { return d.Root } // ID returns the generated id of the daemon func (d *Daemon) ID() string { return d.id } // StorageDriver returns the configured storage driver of the daemon func (d *Daemon) StorageDriver() string { return d.storageDriver } // Sock returns the socket path of the daemon func (d *Daemon) Sock() string { return fmt.Sprintf("unix://" + d.sockPath()) } func (d *Daemon) sockPath() string { return filepath.Join(SockRoot, d.id+".sock") } // LogFileName returns the path the daemon's log file func (d *Daemon) LogFileName() string { return d.logFile.Name() } // ReadLogFile returns the content of the daemon log file func (d *Daemon) ReadLogFile() ([]byte, error) { return os.ReadFile(d.logFile.Name()) } // NewClientT creates new client based on daemon's socket path func (d *Daemon) NewClientT(t testing.TB, extraOpts ...client.Opt) *client.Client { t.Helper() c, err := d.NewClient(extraOpts...) assert.NilError(t, err, "[%s] could not create daemon client", d.id) return c } // NewClient creates new client based on daemon's socket path func (d *Daemon) NewClient(extraOpts ...client.Opt) (*client.Client, error) { clientOpts := []client.Opt{ client.FromEnv, client.WithHost(d.Sock()), } clientOpts = append(clientOpts, extraOpts...) return client.NewClientWithOpts(clientOpts...) } // Cleanup cleans the daemon files : exec root (network namespaces, ...), swarmkit files func (d *Daemon) Cleanup(t testing.TB) { t.Helper() cleanupMount(t, d) cleanupRaftDir(t, d) cleanupNetworkNamespace(t, d) } // Start starts the daemon and return once it is ready to receive requests. func (d *Daemon) Start(t testing.TB, args ...string) { t.Helper() if err := d.StartWithError(args...); err != nil { d.DumpStackAndQuit() // in case the daemon is stuck t.Fatalf("[%s] failed to start daemon with arguments %v : %v", d.id, d.args, err) } } // StartWithError starts the daemon and return once it is ready to receive requests. // It returns an error in case it couldn't start. func (d *Daemon) StartWithError(args ...string) error { logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) if err != nil { return errors.Wrapf(err, "[%s] failed to create logfile", d.id) } return d.StartWithLogFile(logFile, args...) } // StartWithLogFile will start the daemon and attach its streams to a given file. func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { d.handleUserns() dockerdBinary, err := d.BinaryPath() if err != nil { return err } if d.pidFile == "" { d.pidFile = filepath.Join(d.Folder, "docker.pid") } d.args = []string{} if d.rootlessUser != nil { if d.dockerdBinary != defaultDockerdBinary { return errors.Errorf("[%s] DOCKER_ROOTLESS doesn't support non-default dockerd binary path %q", d.id, d.dockerdBinary) } dockerdBinary = "sudo" d.args = append(d.args, "-u", d.rootlessUser.Username, "-E", "XDG_RUNTIME_DIR="+d.rootlessXDGRuntimeDir, "-E", "HOME="+d.rootlessUser.HomeDir, "-E", "PATH="+os.Getenv("PATH"), "--", defaultDockerdRootlessBinary, ) } d.args = append(d.args, "--data-root", d.Root, "--exec-root", d.execRoot, "--pidfile", d.pidFile, fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), "--containerd-namespace", d.id, "--containerd-plugins-namespace", d.id+"p", ) if d.containerdSocket != "" { d.args = append(d.args, "--containerd", d.containerdSocket) } if d.defaultCgroupNamespaceMode != "" { d.args = append(d.args, "--default-cgroupns-mode", d.defaultCgroupNamespaceMode) } if d.experimental { d.args = append(d.args, "--experimental") } if d.init { d.args = append(d.args, "--init") } if !(d.UseDefaultHost || d.UseDefaultTLSHost) { d.args = append(d.args, "--host", d.Sock()) } if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { d.args = append(d.args, "--userns-remap", root) } // If we don't explicitly set the log-level or debug flag(-D) then // turn on debug mode foundLog := false foundSd := false for _, a := range providedArgs { if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { foundLog = true } if strings.Contains(a, "--storage-driver") { foundSd = true } } if !foundLog { d.args = append(d.args, "--debug") } if d.storageDriver != "" && !foundSd { d.args = append(d.args, "--storage-driver", d.storageDriver) } d.args = append(d.args, providedArgs...) d.cmd = exec.Command(dockerdBinary, d.args...) d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") d.cmd.Stdout = out d.cmd.Stderr = out d.logFile = out if d.rootlessUser != nil { // sudo requires this for propagating signals setsid(d.cmd) } if err := d.cmd.Start(); err != nil { return errors.Wrapf(err, "[%s] could not start daemon container", d.id) } wait := make(chan error, 1) go func() { ret := d.cmd.Wait() d.log.Logf("[%s] exiting daemon", d.id) // If we send before logging, we might accidentally log _after_ the test is done. // As of Go 1.12, this incurs a panic instead of silently being dropped. wait <- ret close(wait) }() d.Wait = wait clientConfig, err := d.getClientConfig() if err != nil { return err } client := &http.Client{ Transport: clientConfig.transport, } req, err := http.NewRequest(http.MethodGet, "/_ping", nil) if err != nil { return errors.Wrapf(err, "[%s] could not create new request", d.id) } req.URL.Host = clientConfig.addr req.URL.Scheme = clientConfig.scheme ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() // make sure daemon is ready to receive requests for i := 0; ; i++ { d.log.Logf("[%s] waiting for daemon to start", d.id) select { case <-ctx.Done(): return errors.Wrapf(ctx.Err(), "[%s] daemon exited and never started", d.id) case err := <-d.Wait: return errors.Wrapf(err, "[%s] daemon exited during startup", d.id) default: rctx, rcancel := context.WithTimeout(context.TODO(), 2*time.Second) defer rcancel() resp, err := client.Do(req.WithContext(rctx)) if err != nil { if i > 2 { // don't log the first couple, this ends up just being noise d.log.Logf("[%s] error pinging daemon on start: %v", d.id, err) } select { case <-ctx.Done(): case <-time.After(500 * time.Millisecond): } continue } resp.Body.Close() if resp.StatusCode != http.StatusOK { d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status) } d.log.Logf("[%s] daemon started\n", d.id) d.Root, err = d.queryRootDir() if err != nil { return errors.Wrapf(err, "[%s] error querying daemon for root directory", d.id) } return nil } } } // StartWithBusybox will first start the daemon with Daemon.Start() // then save the busybox image from the main daemon and load it into this Daemon instance. func (d *Daemon) StartWithBusybox(t testing.TB, arg ...string) { t.Helper() d.Start(t, arg...) d.LoadBusybox(t) } // Kill will send a SIGKILL to the daemon func (d *Daemon) Kill() error { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } defer func() { d.logFile.Close() d.cmd = nil }() if err := d.cmd.Process.Kill(); err != nil { return err } if d.pidFile != "" { _ = os.Remove(d.pidFile) } return nil } // Pid returns the pid of the daemon func (d *Daemon) Pid() int { return d.cmd.Process.Pid } // Interrupt stops the daemon by sending it an Interrupt signal func (d *Daemon) Interrupt() error { return d.Signal(os.Interrupt) } // Signal sends the specified signal to the daemon if running func (d *Daemon) Signal(signal os.Signal) error { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } return d.cmd.Process.Signal(signal) } // DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its // stack to its log file and exit // This is used primarily for gathering debug information on test timeout func (d *Daemon) DumpStackAndQuit() { if d.cmd == nil || d.cmd.Process == nil { return } SignalDaemonDump(d.cmd.Process.Pid) } // Stop will send a SIGINT every second and wait for the daemon to stop. // If it times out, a SIGKILL is sent. // Stop will not delete the daemon directory. If a purged daemon is needed, // instantiate a new one with NewDaemon. // If an error occurs while starting the daemon, the test will fail. func (d *Daemon) Stop(t testing.TB) { t.Helper() err := d.StopWithError() if err != nil { if err != errDaemonNotStarted { t.Fatalf("[%s] error while stopping the daemon: %v", d.id, err) } else { t.Logf("[%s] daemon is not started", d.id) } } } // StopWithError will send a SIGINT every second and wait for the daemon to stop. // If it timeouts, a SIGKILL is sent. // Stop will not delete the daemon directory. If a purged daemon is needed, // instantiate a new one with NewDaemon. func (d *Daemon) StopWithError() (err error) { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } defer func() { if err != nil { d.log.Logf("[%s] error while stopping daemon: %v", d.id, err) } else { d.log.Logf("[%s] daemon stopped", d.id) if d.pidFile != "" { _ = os.Remove(d.pidFile) } } if err := d.logFile.Close(); err != nil { d.log.Logf("[%s] failed to close daemon logfile: %v", d.id, err) } d.cmd = nil }() i := 1 ticker := time.NewTicker(time.Second) defer ticker.Stop() tick := ticker.C d.log.Logf("[%s] stopping daemon", d.id) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { if strings.Contains(err.Error(), "os: process already finished") { return errDaemonNotStarted } return errors.Wrapf(err, "[%s] could not send signal", d.id) } out1: for { select { case err := <-d.Wait: return err case <-time.After(20 * time.Second): // time for stopping jobs and run onShutdown hooks d.log.Logf("[%s] daemon stop timed out after 20 seconds", d.id) break out1 } } out2: for { select { case err := <-d.Wait: return err case <-tick: i++ if i > 5 { d.log.Logf("[%s] tried to interrupt daemon for %d times, now try to kill it", d.id, i) break out2 } d.log.Logf("[%d] attempt #%d/5: daemon is still running with pid %d", i, d.cmd.Process.Pid) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { return errors.Wrapf(err, "[%s] attempt #%d/5 could not send signal", d.id, i) } } } if err := d.cmd.Process.Kill(); err != nil { d.log.Logf("[%s] failed to kill daemon: %v", d.id, err) return err } return nil } // Restart will restart the daemon by first stopping it and the starting it. // If an error occurs while starting the daemon, the test will fail. func (d *Daemon) Restart(t testing.TB, args ...string) { t.Helper() d.Stop(t) d.Start(t, args...) } // RestartWithError will restart the daemon by first stopping it and then starting it. func (d *Daemon) RestartWithError(arg ...string) error { if err := d.StopWithError(); err != nil { return err } return d.StartWithError(arg...) } func (d *Daemon) handleUserns() { // in the case of tests running a user namespace-enabled daemon, we have resolved // d.Root to be the actual final path of the graph dir after the "uid.gid" of // remapped root is added--we need to subtract it from the path before calling // start or else we will continue making subdirectories rather than truly restarting // with the same location/root: if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { d.Root = filepath.Dir(d.Root) } } // ReloadConfig asks the daemon to reload its configuration func (d *Daemon) ReloadConfig() error { if d.cmd == nil || d.cmd.Process == nil { return errors.New("daemon is not running") } errCh := make(chan error, 1) started := make(chan struct{}) go func() { _, body, err := request.Get("/events", request.Host(d.Sock())) close(started) if err != nil { errCh <- err return } defer body.Close() dec := json.NewDecoder(body) for { var e events.Message if err := dec.Decode(&e); err != nil { errCh <- err return } if e.Type != events.DaemonEventType { continue } if e.Action != "reload" { continue } close(errCh) // notify that we are done return } }() <-started if err := signalDaemonReload(d.cmd.Process.Pid); err != nil { return errors.Wrapf(err, "[%s] error signaling daemon reload", d.id) } select { case err := <-errCh: if err != nil { return errors.Wrapf(err, "[%s] error waiting for daemon reload event", d.id) } case <-time.After(30 * time.Second): return errors.Errorf("[%s] daemon reload event timed out after 30 seconds", d.id) } return nil } // LoadBusybox image into the daemon func (d *Daemon) LoadBusybox(t testing.TB) { t.Helper() clientHost, err := client.NewClientWithOpts(client.FromEnv) assert.NilError(t, err, "[%s] failed to create client", d.id) defer clientHost.Close() ctx := context.Background() reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"}) assert.NilError(t, err, "[%s] failed to download busybox", d.id) defer reader.Close() c := d.NewClientT(t) defer c.Close() resp, err := c.ImageLoad(ctx, reader, true) assert.NilError(t, err, "[%s] failed to load busybox", d.id) defer resp.Body.Close() } func (d *Daemon) getClientConfig() (*clientConfig, error) { var ( transport *http.Transport scheme string addr string proto string ) if d.UseDefaultTLSHost { option := &tlsconfig.Options{ CAFile: "fixtures/https/ca.pem", CertFile: "fixtures/https/client-cert.pem", KeyFile: "fixtures/https/client-key.pem", } tlsConfig, err := tlsconfig.Client(*option) if err != nil { return nil, err } transport = &http.Transport{ TLSClientConfig: tlsConfig, } addr = defaultTLSHost scheme = "https" proto = "tcp" } else if d.UseDefaultHost { addr = defaultUnixSocket proto = "unix" scheme = "http" transport = &http.Transport{} } else { addr = d.sockPath() proto = "unix" scheme = "http" transport = &http.Transport{} } if err := sockets.ConfigureTransport(transport, proto, addr); err != nil { return nil, err } transport.DisableKeepAlives = true if proto == "unix" { addr = filepath.Base(addr) } return &clientConfig{ transport: transport, scheme: scheme, addr: addr, }, nil } func (d *Daemon) queryRootDir() (string, error) { // update daemon root by asking /info endpoint (to support user // namespaced daemon with root remapped uid.gid directory) clientConfig, err := d.getClientConfig() if err != nil { return "", err } c := &http.Client{ Transport: clientConfig.transport, } req, err := http.NewRequest(http.MethodGet, "/info", nil) if err != nil { return "", err } req.Header.Set("Content-Type", "application/json") req.URL.Host = clientConfig.addr req.URL.Scheme = clientConfig.scheme resp, err := c.Do(req) if err != nil { return "", err } body := ioutils.NewReadCloserWrapper(resp.Body, func() error { return resp.Body.Close() }) type Info struct { DockerRootDir string } var b []byte var i Info b, err = request.ReadBody(body) if err == nil && resp.StatusCode == http.StatusOK { // read the docker root dir if err = json.Unmarshal(b, &i); err == nil { return i.DockerRootDir, nil } } return "", err } // Info returns the info struct for this daemon func (d *Daemon) Info(t testing.TB) types.Info { t.Helper() c := d.NewClientT(t) info, err := c.Info(context.Background()) assert.NilError(t, err) assert.NilError(t, c.Close()) return info } // cleanupRaftDir removes swarmkit wal files if present func cleanupRaftDir(t testing.TB, d *Daemon) { t.Helper() for _, p := range []string{"wal", "wal-v3-encrypted", "snap-v3-encrypted"} { dir := filepath.Join(d.Root, "swarm/raft", p) if err := os.RemoveAll(dir); err != nil { t.Logf("[%s] error removing %v: %v", d.id, dir, err) } } }
package daemon // import "github.com/docker/docker/testutil/daemon" import ( "context" "encoding/json" "fmt" "net/http" "os" "os/exec" "os/user" "path/filepath" "strconv" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/client" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/testutil/request" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" "gotest.tools/v3/assert" ) // LogT is the subset of the testing.TB interface used by the daemon. type LogT interface { Logf(string, ...interface{}) } // nopLog is a no-op implementation of LogT that is used in daemons created by // NewDaemon (where no testing.TB is available). type nopLog struct{} func (nopLog) Logf(string, ...interface{}) {} const ( defaultDockerdBinary = "dockerd" defaultContainerdSocket = "/var/run/docker/containerd/containerd.sock" defaultDockerdRootlessBinary = "dockerd-rootless.sh" defaultUnixSocket = "/var/run/docker.sock" defaultTLSHost = "localhost:2376" ) var errDaemonNotStarted = errors.New("daemon not started") // SockRoot holds the path of the default docker integration daemon socket var SockRoot = filepath.Join(os.TempDir(), "docker-integration") type clientConfig struct { transport *http.Transport scheme string addr string } // Daemon represents a Docker daemon for the testing framework type Daemon struct { Root string Folder string Wait chan error UseDefaultHost bool UseDefaultTLSHost bool id string logFile *os.File cmd *exec.Cmd storageDriver string userlandProxy bool defaultCgroupNamespaceMode string execRoot string experimental bool init bool dockerdBinary string log LogT pidFile string args []string containerdSocket string rootlessUser *user.User rootlessXDGRuntimeDir string // swarm related field swarmListenAddr string SwarmPort int // FIXME(vdemeester) should probably not be exported DefaultAddrPool []string SubnetSize uint32 DataPathPort uint32 OOMScoreAdjust int // cached information CachedInfo types.Info } // NewDaemon returns a Daemon instance to be used for testing. // The daemon will not automatically start. // The daemon will modify and create files under workingDir. func NewDaemon(workingDir string, ops ...Option) (*Daemon, error) { storageDriver := os.Getenv("DOCKER_GRAPHDRIVER") if err := os.MkdirAll(SockRoot, 0700); err != nil { return nil, errors.Wrapf(err, "failed to create daemon socket root %q", SockRoot) } id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) dir := filepath.Join(workingDir, id) daemonFolder, err := filepath.Abs(dir) if err != nil { return nil, err } daemonRoot := filepath.Join(daemonFolder, "root") if err := os.MkdirAll(daemonRoot, 0755); err != nil { return nil, errors.Wrapf(err, "failed to create daemon root %q", daemonRoot) } userlandProxy := true if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { if val, err := strconv.ParseBool(env); err != nil { userlandProxy = val } } d := &Daemon{ id: id, Folder: daemonFolder, Root: daemonRoot, storageDriver: storageDriver, userlandProxy: userlandProxy, // dxr stands for docker-execroot (shortened for avoiding unix(7) path length limitation) execRoot: filepath.Join(os.TempDir(), "dxr", id), dockerdBinary: defaultDockerdBinary, swarmListenAddr: defaultSwarmListenAddr, SwarmPort: DefaultSwarmPort, log: nopLog{}, containerdSocket: defaultContainerdSocket, } for _, op := range ops { op(d) } if d.rootlessUser != nil { if err := os.Chmod(SockRoot, 0777); err != nil { return nil, err } uid, err := strconv.Atoi(d.rootlessUser.Uid) if err != nil { return nil, err } gid, err := strconv.Atoi(d.rootlessUser.Gid) if err != nil { return nil, err } if err := os.Chown(d.Folder, uid, gid); err != nil { return nil, err } if err := os.Chown(d.Root, uid, gid); err != nil { return nil, err } if err := os.MkdirAll(filepath.Dir(d.execRoot), 0700); err != nil { return nil, err } if err := os.Chown(filepath.Dir(d.execRoot), uid, gid); err != nil { return nil, err } if err := os.MkdirAll(d.execRoot, 0700); err != nil { return nil, err } if err := os.Chown(d.execRoot, uid, gid); err != nil { return nil, err } d.rootlessXDGRuntimeDir = filepath.Join(d.Folder, "xdgrun") if err := os.MkdirAll(d.rootlessXDGRuntimeDir, 0700); err != nil { return nil, err } if err := os.Chown(d.rootlessXDGRuntimeDir, uid, gid); err != nil { return nil, err } d.containerdSocket = "" } return d, nil } // New returns a Daemon instance to be used for testing. // This will create a directory such as d123456789 in the folder specified by // $DOCKER_INTEGRATION_DAEMON_DEST or $DEST. // The daemon will not automatically start. func New(t testing.TB, ops ...Option) *Daemon { t.Helper() dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } dest = filepath.Join(dest, t.Name()) assert.Check(t, dest != "", "Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable") if os.Getenv("DOCKER_ROOTLESS") != "" { if os.Getenv("DOCKER_REMAP_ROOT") != "" { t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_REMAP_ROOT currently") } if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { if val, err := strconv.ParseBool(env); err == nil && !val { t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_USERLANDPROXY=false") } } ops = append(ops, WithRootlessUser("unprivilegeduser")) } ops = append(ops, WithOOMScoreAdjust(-500)) d, err := NewDaemon(dest, ops...) assert.NilError(t, err, "could not create daemon at %q", dest) if d.rootlessUser != nil && d.dockerdBinary != defaultDockerdBinary { t.Skipf("DOCKER_ROOTLESS doesn't support specifying non-default dockerd binary path %q", d.dockerdBinary) } return d } // BinaryPath returns the binary and its arguments. func (d *Daemon) BinaryPath() (string, error) { dockerdBinary, err := exec.LookPath(d.dockerdBinary) if err != nil { return "", errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id) } return dockerdBinary, nil } // ContainersNamespace returns the containerd namespace used for containers. func (d *Daemon) ContainersNamespace() string { return d.id } // RootDir returns the root directory of the daemon. func (d *Daemon) RootDir() string { return d.Root } // ID returns the generated id of the daemon func (d *Daemon) ID() string { return d.id } // StorageDriver returns the configured storage driver of the daemon func (d *Daemon) StorageDriver() string { return d.storageDriver } // Sock returns the socket path of the daemon func (d *Daemon) Sock() string { return fmt.Sprintf("unix://" + d.sockPath()) } func (d *Daemon) sockPath() string { return filepath.Join(SockRoot, d.id+".sock") } // LogFileName returns the path the daemon's log file func (d *Daemon) LogFileName() string { return d.logFile.Name() } // ReadLogFile returns the content of the daemon log file func (d *Daemon) ReadLogFile() ([]byte, error) { _ = d.logFile.Sync() return os.ReadFile(d.logFile.Name()) } // NewClientT creates new client based on daemon's socket path func (d *Daemon) NewClientT(t testing.TB, extraOpts ...client.Opt) *client.Client { t.Helper() c, err := d.NewClient(extraOpts...) assert.NilError(t, err, "[%s] could not create daemon client", d.id) return c } // NewClient creates new client based on daemon's socket path func (d *Daemon) NewClient(extraOpts ...client.Opt) (*client.Client, error) { clientOpts := []client.Opt{ client.FromEnv, client.WithHost(d.Sock()), } clientOpts = append(clientOpts, extraOpts...) return client.NewClientWithOpts(clientOpts...) } // Cleanup cleans the daemon files : exec root (network namespaces, ...), swarmkit files func (d *Daemon) Cleanup(t testing.TB) { t.Helper() cleanupMount(t, d) cleanupRaftDir(t, d) cleanupNetworkNamespace(t, d) } // Start starts the daemon and return once it is ready to receive requests. func (d *Daemon) Start(t testing.TB, args ...string) { t.Helper() if err := d.StartWithError(args...); err != nil { d.DumpStackAndQuit() // in case the daemon is stuck t.Fatalf("[%s] failed to start daemon with arguments %v : %v", d.id, d.args, err) } } // StartWithError starts the daemon and return once it is ready to receive requests. // It returns an error in case it couldn't start. func (d *Daemon) StartWithError(args ...string) error { logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) if err != nil { return errors.Wrapf(err, "[%s] failed to create logfile", d.id) } return d.StartWithLogFile(logFile, args...) } // StartWithLogFile will start the daemon and attach its streams to a given file. func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { d.handleUserns() dockerdBinary, err := d.BinaryPath() if err != nil { return err } if d.pidFile == "" { d.pidFile = filepath.Join(d.Folder, "docker.pid") } d.args = []string{} if d.rootlessUser != nil { if d.dockerdBinary != defaultDockerdBinary { return errors.Errorf("[%s] DOCKER_ROOTLESS doesn't support non-default dockerd binary path %q", d.id, d.dockerdBinary) } dockerdBinary = "sudo" d.args = append(d.args, "-u", d.rootlessUser.Username, "-E", "XDG_RUNTIME_DIR="+d.rootlessXDGRuntimeDir, "-E", "HOME="+d.rootlessUser.HomeDir, "-E", "PATH="+os.Getenv("PATH"), "--", defaultDockerdRootlessBinary, ) } d.args = append(d.args, "--data-root", d.Root, "--exec-root", d.execRoot, "--pidfile", d.pidFile, fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), "--containerd-namespace", d.id, "--containerd-plugins-namespace", d.id+"p", ) if d.containerdSocket != "" { d.args = append(d.args, "--containerd", d.containerdSocket) } if d.defaultCgroupNamespaceMode != "" { d.args = append(d.args, "--default-cgroupns-mode", d.defaultCgroupNamespaceMode) } if d.experimental { d.args = append(d.args, "--experimental") } if d.init { d.args = append(d.args, "--init") } if !(d.UseDefaultHost || d.UseDefaultTLSHost) { d.args = append(d.args, "--host", d.Sock()) } if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { d.args = append(d.args, "--userns-remap", root) } // If we don't explicitly set the log-level or debug flag(-D) then // turn on debug mode foundLog := false foundSd := false for _, a := range providedArgs { if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { foundLog = true } if strings.Contains(a, "--storage-driver") { foundSd = true } } if !foundLog { d.args = append(d.args, "--debug") } if d.storageDriver != "" && !foundSd { d.args = append(d.args, "--storage-driver", d.storageDriver) } d.args = append(d.args, providedArgs...) d.cmd = exec.Command(dockerdBinary, d.args...) d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") d.cmd.Stdout = out d.cmd.Stderr = out d.logFile = out if d.rootlessUser != nil { // sudo requires this for propagating signals setsid(d.cmd) } if err := d.cmd.Start(); err != nil { return errors.Wrapf(err, "[%s] could not start daemon container", d.id) } wait := make(chan error, 1) go func() { ret := d.cmd.Wait() d.log.Logf("[%s] exiting daemon", d.id) // If we send before logging, we might accidentally log _after_ the test is done. // As of Go 1.12, this incurs a panic instead of silently being dropped. wait <- ret close(wait) }() d.Wait = wait clientConfig, err := d.getClientConfig() if err != nil { return err } client := &http.Client{ Transport: clientConfig.transport, } req, err := http.NewRequest(http.MethodGet, "/_ping", nil) if err != nil { return errors.Wrapf(err, "[%s] could not create new request", d.id) } req.URL.Host = clientConfig.addr req.URL.Scheme = clientConfig.scheme ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() // make sure daemon is ready to receive requests for i := 0; ; i++ { d.log.Logf("[%s] waiting for daemon to start", d.id) select { case <-ctx.Done(): return errors.Wrapf(ctx.Err(), "[%s] daemon exited and never started", d.id) case err := <-d.Wait: return errors.Wrapf(err, "[%s] daemon exited during startup", d.id) default: rctx, rcancel := context.WithTimeout(context.TODO(), 2*time.Second) defer rcancel() resp, err := client.Do(req.WithContext(rctx)) if err != nil { if i > 2 { // don't log the first couple, this ends up just being noise d.log.Logf("[%s] error pinging daemon on start: %v", d.id, err) } select { case <-ctx.Done(): case <-time.After(500 * time.Millisecond): } continue } resp.Body.Close() if resp.StatusCode != http.StatusOK { d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status) } d.log.Logf("[%s] daemon started\n", d.id) d.Root, err = d.queryRootDir() if err != nil { return errors.Wrapf(err, "[%s] error querying daemon for root directory", d.id) } return nil } } } // StartWithBusybox will first start the daemon with Daemon.Start() // then save the busybox image from the main daemon and load it into this Daemon instance. func (d *Daemon) StartWithBusybox(t testing.TB, arg ...string) { t.Helper() d.Start(t, arg...) d.LoadBusybox(t) } // Kill will send a SIGKILL to the daemon func (d *Daemon) Kill() error { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } defer func() { d.logFile.Close() d.cmd = nil }() if err := d.cmd.Process.Kill(); err != nil { return err } if d.pidFile != "" { _ = os.Remove(d.pidFile) } return nil } // Pid returns the pid of the daemon func (d *Daemon) Pid() int { return d.cmd.Process.Pid } // Interrupt stops the daemon by sending it an Interrupt signal func (d *Daemon) Interrupt() error { return d.Signal(os.Interrupt) } // Signal sends the specified signal to the daemon if running func (d *Daemon) Signal(signal os.Signal) error { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } return d.cmd.Process.Signal(signal) } // DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its // stack to its log file and exit // This is used primarily for gathering debug information on test timeout func (d *Daemon) DumpStackAndQuit() { if d.cmd == nil || d.cmd.Process == nil { return } SignalDaemonDump(d.cmd.Process.Pid) } // Stop will send a SIGINT every second and wait for the daemon to stop. // If it times out, a SIGKILL is sent. // Stop will not delete the daemon directory. If a purged daemon is needed, // instantiate a new one with NewDaemon. // If an error occurs while starting the daemon, the test will fail. func (d *Daemon) Stop(t testing.TB) { t.Helper() err := d.StopWithError() if err != nil { if err != errDaemonNotStarted { t.Fatalf("[%s] error while stopping the daemon: %v", d.id, err) } else { t.Logf("[%s] daemon is not started", d.id) } } } // StopWithError will send a SIGINT every second and wait for the daemon to stop. // If it timeouts, a SIGKILL is sent. // Stop will not delete the daemon directory. If a purged daemon is needed, // instantiate a new one with NewDaemon. func (d *Daemon) StopWithError() (err error) { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } defer func() { if err != nil { d.log.Logf("[%s] error while stopping daemon: %v", d.id, err) } else { d.log.Logf("[%s] daemon stopped", d.id) if d.pidFile != "" { _ = os.Remove(d.pidFile) } } if err := d.logFile.Close(); err != nil { d.log.Logf("[%s] failed to close daemon logfile: %v", d.id, err) } d.cmd = nil }() i := 1 ticker := time.NewTicker(time.Second) defer ticker.Stop() tick := ticker.C d.log.Logf("[%s] stopping daemon", d.id) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { if strings.Contains(err.Error(), "os: process already finished") { return errDaemonNotStarted } return errors.Wrapf(err, "[%s] could not send signal", d.id) } out1: for { select { case err := <-d.Wait: return err case <-time.After(20 * time.Second): // time for stopping jobs and run onShutdown hooks d.log.Logf("[%s] daemon stop timed out after 20 seconds", d.id) break out1 } } out2: for { select { case err := <-d.Wait: return err case <-tick: i++ if i > 5 { d.log.Logf("[%s] tried to interrupt daemon for %d times, now try to kill it", d.id, i) break out2 } d.log.Logf("[%d] attempt #%d/5: daemon is still running with pid %d", i, d.cmd.Process.Pid) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { return errors.Wrapf(err, "[%s] attempt #%d/5 could not send signal", d.id, i) } } } if err := d.cmd.Process.Kill(); err != nil { d.log.Logf("[%s] failed to kill daemon: %v", d.id, err) return err } return nil } // Restart will restart the daemon by first stopping it and the starting it. // If an error occurs while starting the daemon, the test will fail. func (d *Daemon) Restart(t testing.TB, args ...string) { t.Helper() d.Stop(t) d.Start(t, args...) } // RestartWithError will restart the daemon by first stopping it and then starting it. func (d *Daemon) RestartWithError(arg ...string) error { if err := d.StopWithError(); err != nil { return err } return d.StartWithError(arg...) } func (d *Daemon) handleUserns() { // in the case of tests running a user namespace-enabled daemon, we have resolved // d.Root to be the actual final path of the graph dir after the "uid.gid" of // remapped root is added--we need to subtract it from the path before calling // start or else we will continue making subdirectories rather than truly restarting // with the same location/root: if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { d.Root = filepath.Dir(d.Root) } } // ReloadConfig asks the daemon to reload its configuration func (d *Daemon) ReloadConfig() error { if d.cmd == nil || d.cmd.Process == nil { return errors.New("daemon is not running") } errCh := make(chan error, 1) started := make(chan struct{}) go func() { _, body, err := request.Get("/events", request.Host(d.Sock())) close(started) if err != nil { errCh <- err return } defer body.Close() dec := json.NewDecoder(body) for { var e events.Message if err := dec.Decode(&e); err != nil { errCh <- err return } if e.Type != events.DaemonEventType { continue } if e.Action != "reload" { continue } close(errCh) // notify that we are done return } }() <-started if err := signalDaemonReload(d.cmd.Process.Pid); err != nil { return errors.Wrapf(err, "[%s] error signaling daemon reload", d.id) } select { case err := <-errCh: if err != nil { return errors.Wrapf(err, "[%s] error waiting for daemon reload event", d.id) } case <-time.After(30 * time.Second): return errors.Errorf("[%s] daemon reload event timed out after 30 seconds", d.id) } return nil } // LoadBusybox image into the daemon func (d *Daemon) LoadBusybox(t testing.TB) { t.Helper() clientHost, err := client.NewClientWithOpts(client.FromEnv) assert.NilError(t, err, "[%s] failed to create client", d.id) defer clientHost.Close() ctx := context.Background() reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"}) assert.NilError(t, err, "[%s] failed to download busybox", d.id) defer reader.Close() c := d.NewClientT(t) defer c.Close() resp, err := c.ImageLoad(ctx, reader, true) assert.NilError(t, err, "[%s] failed to load busybox", d.id) defer resp.Body.Close() } func (d *Daemon) getClientConfig() (*clientConfig, error) { var ( transport *http.Transport scheme string addr string proto string ) if d.UseDefaultTLSHost { option := &tlsconfig.Options{ CAFile: "fixtures/https/ca.pem", CertFile: "fixtures/https/client-cert.pem", KeyFile: "fixtures/https/client-key.pem", } tlsConfig, err := tlsconfig.Client(*option) if err != nil { return nil, err } transport = &http.Transport{ TLSClientConfig: tlsConfig, } addr = defaultTLSHost scheme = "https" proto = "tcp" } else if d.UseDefaultHost { addr = defaultUnixSocket proto = "unix" scheme = "http" transport = &http.Transport{} } else { addr = d.sockPath() proto = "unix" scheme = "http" transport = &http.Transport{} } if err := sockets.ConfigureTransport(transport, proto, addr); err != nil { return nil, err } transport.DisableKeepAlives = true if proto == "unix" { addr = filepath.Base(addr) } return &clientConfig{ transport: transport, scheme: scheme, addr: addr, }, nil } func (d *Daemon) queryRootDir() (string, error) { // update daemon root by asking /info endpoint (to support user // namespaced daemon with root remapped uid.gid directory) clientConfig, err := d.getClientConfig() if err != nil { return "", err } c := &http.Client{ Transport: clientConfig.transport, } req, err := http.NewRequest(http.MethodGet, "/info", nil) if err != nil { return "", err } req.Header.Set("Content-Type", "application/json") req.URL.Host = clientConfig.addr req.URL.Scheme = clientConfig.scheme resp, err := c.Do(req) if err != nil { return "", err } body := ioutils.NewReadCloserWrapper(resp.Body, func() error { return resp.Body.Close() }) type Info struct { DockerRootDir string } var b []byte var i Info b, err = request.ReadBody(body) if err == nil && resp.StatusCode == http.StatusOK { // read the docker root dir if err = json.Unmarshal(b, &i); err == nil { return i.DockerRootDir, nil } } return "", err } // Info returns the info struct for this daemon func (d *Daemon) Info(t testing.TB) types.Info { t.Helper() c := d.NewClientT(t) info, err := c.Info(context.Background()) assert.NilError(t, err) assert.NilError(t, c.Close()) return info } // cleanupRaftDir removes swarmkit wal files if present func cleanupRaftDir(t testing.TB, d *Daemon) { t.Helper() for _, p := range []string{"wal", "wal-v3-encrypted", "snap-v3-encrypted"} { dir := filepath.Join(d.Root, "swarm/raft", p) if err := os.RemoveAll(dir); err != nil { t.Logf("[%s] error removing %v: %v", d.id, dir, err) } } }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
Looks "better", but still flaky So, looks like test-integration-flaky (perhaps because of the subtests) runs the test 25 times in total: ```bash cd /Users/sebastiaan/Downloads/bundles/1/test-integration-flaky/TestDaemonProxy/reload_sanitized ls -l drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 d03a7aefa903c/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 d12825289bf2b/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 d35d0bbf7367a/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 d47371778e153/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 d4c7283bf1c32/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 d59a9befb682b/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 d5c38961c3cca/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 d5c6cf3878a96/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 d5f09c9777209/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 d6e6a3dc8339f/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 d7ef34388452f/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 d7fc5ed00fc65/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 d83c195f1c19d/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 d9ac12b0678f4/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 d9da71ee7bf96/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 db95143c8d488/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 dba5677b1543e/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 dbe1bc8fb4409/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 dc09093c6ca96/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 dc3a50433e7c6/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 dc7d0850ec28a/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 dd0482bb66e57/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 dd4312712c345/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 ddc2b2078d2bb/ drwxr-xr-x@ 3 sebastiaan staff 96 Sep 21 16:06 def3e032e5255/ ``` Checking if logs for all of those contains the `Reloaded configuration` string, that looks to be the case: ```bash grep -lr 'Reloaded configuration' . | wc -l 25 ``` But the test failed twice; ```bash grep -r 'does not contain "Reloaded configuration:"' . | wc -l 2 ```
thaJeztah
4,470
moby/moby
42,835
Add http(s) proxy properties to daemon configuration (carry 42647)
### :warning: note that the format in `daemon.json` changed in a follow-up; https://github.com/moby/moby/pull/43448 The new format uses a `"proxies"` key that holds the proxies; ```json { "proxies": { "http-proxy": "http-config", "https-proxy": "https-config", "no-proxy": "no-proxy-config" } } ``` -------- carry of https://github.com/moby/moby/pull/42647 fixes https://github.com/moby/moby/issues/24758 closes https://github.com/moby/moby/pull/42647 addresses https://github.com/moby/moby/issues/40201 This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: (⚠️ https://github.com/moby/moby/pull/43448 changes the location of these fields to be in a "proxy-config" struct within `daemon.json`) cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. - verify that logs and error messages sanitise proxy URLs (as they may contain username / password) ```bash make BIND_DIR=. DOCKER_GRAPHDRIVER=vfs TEST_FILTER=TestDaemonProxy binary test-integration Running integration-test (iteration 1) Running /go/src/github.com/docker/docker/integration/daemon (amd64.integration.daemon) flags=-test.v -test.timeout=5m -test.run TestDaemonProxy === RUN TestDaemonProxy === RUN TestDaemonProxy/environment_variables === RUN TestDaemonProxy/command-line_options === RUN TestDaemonProxy/configuration_file === RUN TestDaemonProxy/conflicting_options === RUN TestDaemonProxy/reload_sanitized --- PASS: TestDaemonProxy (6.75s) --- PASS: TestDaemonProxy/environment_variables (1.84s) --- PASS: TestDaemonProxy/command-line_options (1.84s) --- PASS: TestDaemonProxy/configuration_file (1.93s) --- PASS: TestDaemonProxy/conflicting_options (0.52s) --- PASS: TestDaemonProxy/reload_sanitized (0.63s) PASS DONE 6 tests in 6.942s ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown - Add options to the `daemon.json` configuration file and `dockerd` command-line to configure the daemon's proxy. With these options it is possible to configure http(s) proxies for the daemon through the configuration file as an alternative to the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables, or to override the system-wide proxy configuration set in those environment variables. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-09-09 12:06:21+00:00
2021-10-28 18:14:54+00:00
testutil/daemon/daemon.go
package daemon // import "github.com/docker/docker/testutil/daemon" import ( "context" "encoding/json" "fmt" "net/http" "os" "os/exec" "os/user" "path/filepath" "strconv" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/client" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/testutil/request" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" "gotest.tools/v3/assert" ) // LogT is the subset of the testing.TB interface used by the daemon. type LogT interface { Logf(string, ...interface{}) } // nopLog is a no-op implementation of LogT that is used in daemons created by // NewDaemon (where no testing.TB is available). type nopLog struct{} func (nopLog) Logf(string, ...interface{}) {} const ( defaultDockerdBinary = "dockerd" defaultContainerdSocket = "/var/run/docker/containerd/containerd.sock" defaultDockerdRootlessBinary = "dockerd-rootless.sh" defaultUnixSocket = "/var/run/docker.sock" defaultTLSHost = "localhost:2376" ) var errDaemonNotStarted = errors.New("daemon not started") // SockRoot holds the path of the default docker integration daemon socket var SockRoot = filepath.Join(os.TempDir(), "docker-integration") type clientConfig struct { transport *http.Transport scheme string addr string } // Daemon represents a Docker daemon for the testing framework type Daemon struct { Root string Folder string Wait chan error UseDefaultHost bool UseDefaultTLSHost bool id string logFile *os.File cmd *exec.Cmd storageDriver string userlandProxy bool defaultCgroupNamespaceMode string execRoot string experimental bool init bool dockerdBinary string log LogT pidFile string args []string containerdSocket string rootlessUser *user.User rootlessXDGRuntimeDir string // swarm related field swarmListenAddr string SwarmPort int // FIXME(vdemeester) should probably not be exported DefaultAddrPool []string SubnetSize uint32 DataPathPort uint32 OOMScoreAdjust int // cached information CachedInfo types.Info } // NewDaemon returns a Daemon instance to be used for testing. // The daemon will not automatically start. // The daemon will modify and create files under workingDir. func NewDaemon(workingDir string, ops ...Option) (*Daemon, error) { storageDriver := os.Getenv("DOCKER_GRAPHDRIVER") if err := os.MkdirAll(SockRoot, 0700); err != nil { return nil, errors.Wrapf(err, "failed to create daemon socket root %q", SockRoot) } id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) dir := filepath.Join(workingDir, id) daemonFolder, err := filepath.Abs(dir) if err != nil { return nil, err } daemonRoot := filepath.Join(daemonFolder, "root") if err := os.MkdirAll(daemonRoot, 0755); err != nil { return nil, errors.Wrapf(err, "failed to create daemon root %q", daemonRoot) } userlandProxy := true if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { if val, err := strconv.ParseBool(env); err != nil { userlandProxy = val } } d := &Daemon{ id: id, Folder: daemonFolder, Root: daemonRoot, storageDriver: storageDriver, userlandProxy: userlandProxy, // dxr stands for docker-execroot (shortened for avoiding unix(7) path length limitation) execRoot: filepath.Join(os.TempDir(), "dxr", id), dockerdBinary: defaultDockerdBinary, swarmListenAddr: defaultSwarmListenAddr, SwarmPort: DefaultSwarmPort, log: nopLog{}, containerdSocket: defaultContainerdSocket, } for _, op := range ops { op(d) } if d.rootlessUser != nil { if err := os.Chmod(SockRoot, 0777); err != nil { return nil, err } uid, err := strconv.Atoi(d.rootlessUser.Uid) if err != nil { return nil, err } gid, err := strconv.Atoi(d.rootlessUser.Gid) if err != nil { return nil, err } if err := os.Chown(d.Folder, uid, gid); err != nil { return nil, err } if err := os.Chown(d.Root, uid, gid); err != nil { return nil, err } if err := os.MkdirAll(filepath.Dir(d.execRoot), 0700); err != nil { return nil, err } if err := os.Chown(filepath.Dir(d.execRoot), uid, gid); err != nil { return nil, err } if err := os.MkdirAll(d.execRoot, 0700); err != nil { return nil, err } if err := os.Chown(d.execRoot, uid, gid); err != nil { return nil, err } d.rootlessXDGRuntimeDir = filepath.Join(d.Folder, "xdgrun") if err := os.MkdirAll(d.rootlessXDGRuntimeDir, 0700); err != nil { return nil, err } if err := os.Chown(d.rootlessXDGRuntimeDir, uid, gid); err != nil { return nil, err } d.containerdSocket = "" } return d, nil } // New returns a Daemon instance to be used for testing. // This will create a directory such as d123456789 in the folder specified by // $DOCKER_INTEGRATION_DAEMON_DEST or $DEST. // The daemon will not automatically start. func New(t testing.TB, ops ...Option) *Daemon { t.Helper() dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } dest = filepath.Join(dest, t.Name()) assert.Check(t, dest != "", "Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable") if os.Getenv("DOCKER_ROOTLESS") != "" { if os.Getenv("DOCKER_REMAP_ROOT") != "" { t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_REMAP_ROOT currently") } if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { if val, err := strconv.ParseBool(env); err == nil && !val { t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_USERLANDPROXY=false") } } ops = append(ops, WithRootlessUser("unprivilegeduser")) } ops = append(ops, WithOOMScoreAdjust(-500)) d, err := NewDaemon(dest, ops...) assert.NilError(t, err, "could not create daemon at %q", dest) if d.rootlessUser != nil && d.dockerdBinary != defaultDockerdBinary { t.Skipf("DOCKER_ROOTLESS doesn't support specifying non-default dockerd binary path %q", d.dockerdBinary) } return d } // BinaryPath returns the binary and its arguments. func (d *Daemon) BinaryPath() (string, error) { dockerdBinary, err := exec.LookPath(d.dockerdBinary) if err != nil { return "", errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id) } return dockerdBinary, nil } // ContainersNamespace returns the containerd namespace used for containers. func (d *Daemon) ContainersNamespace() string { return d.id } // RootDir returns the root directory of the daemon. func (d *Daemon) RootDir() string { return d.Root } // ID returns the generated id of the daemon func (d *Daemon) ID() string { return d.id } // StorageDriver returns the configured storage driver of the daemon func (d *Daemon) StorageDriver() string { return d.storageDriver } // Sock returns the socket path of the daemon func (d *Daemon) Sock() string { return fmt.Sprintf("unix://" + d.sockPath()) } func (d *Daemon) sockPath() string { return filepath.Join(SockRoot, d.id+".sock") } // LogFileName returns the path the daemon's log file func (d *Daemon) LogFileName() string { return d.logFile.Name() } // ReadLogFile returns the content of the daemon log file func (d *Daemon) ReadLogFile() ([]byte, error) { return os.ReadFile(d.logFile.Name()) } // NewClientT creates new client based on daemon's socket path func (d *Daemon) NewClientT(t testing.TB, extraOpts ...client.Opt) *client.Client { t.Helper() c, err := d.NewClient(extraOpts...) assert.NilError(t, err, "[%s] could not create daemon client", d.id) return c } // NewClient creates new client based on daemon's socket path func (d *Daemon) NewClient(extraOpts ...client.Opt) (*client.Client, error) { clientOpts := []client.Opt{ client.FromEnv, client.WithHost(d.Sock()), } clientOpts = append(clientOpts, extraOpts...) return client.NewClientWithOpts(clientOpts...) } // Cleanup cleans the daemon files : exec root (network namespaces, ...), swarmkit files func (d *Daemon) Cleanup(t testing.TB) { t.Helper() cleanupMount(t, d) cleanupRaftDir(t, d) cleanupNetworkNamespace(t, d) } // Start starts the daemon and return once it is ready to receive requests. func (d *Daemon) Start(t testing.TB, args ...string) { t.Helper() if err := d.StartWithError(args...); err != nil { d.DumpStackAndQuit() // in case the daemon is stuck t.Fatalf("[%s] failed to start daemon with arguments %v : %v", d.id, d.args, err) } } // StartWithError starts the daemon and return once it is ready to receive requests. // It returns an error in case it couldn't start. func (d *Daemon) StartWithError(args ...string) error { logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) if err != nil { return errors.Wrapf(err, "[%s] failed to create logfile", d.id) } return d.StartWithLogFile(logFile, args...) } // StartWithLogFile will start the daemon and attach its streams to a given file. func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { d.handleUserns() dockerdBinary, err := d.BinaryPath() if err != nil { return err } if d.pidFile == "" { d.pidFile = filepath.Join(d.Folder, "docker.pid") } d.args = []string{} if d.rootlessUser != nil { if d.dockerdBinary != defaultDockerdBinary { return errors.Errorf("[%s] DOCKER_ROOTLESS doesn't support non-default dockerd binary path %q", d.id, d.dockerdBinary) } dockerdBinary = "sudo" d.args = append(d.args, "-u", d.rootlessUser.Username, "-E", "XDG_RUNTIME_DIR="+d.rootlessXDGRuntimeDir, "-E", "HOME="+d.rootlessUser.HomeDir, "-E", "PATH="+os.Getenv("PATH"), "--", defaultDockerdRootlessBinary, ) } d.args = append(d.args, "--data-root", d.Root, "--exec-root", d.execRoot, "--pidfile", d.pidFile, fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), "--containerd-namespace", d.id, "--containerd-plugins-namespace", d.id+"p", ) if d.containerdSocket != "" { d.args = append(d.args, "--containerd", d.containerdSocket) } if d.defaultCgroupNamespaceMode != "" { d.args = append(d.args, "--default-cgroupns-mode", d.defaultCgroupNamespaceMode) } if d.experimental { d.args = append(d.args, "--experimental") } if d.init { d.args = append(d.args, "--init") } if !(d.UseDefaultHost || d.UseDefaultTLSHost) { d.args = append(d.args, "--host", d.Sock()) } if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { d.args = append(d.args, "--userns-remap", root) } // If we don't explicitly set the log-level or debug flag(-D) then // turn on debug mode foundLog := false foundSd := false for _, a := range providedArgs { if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { foundLog = true } if strings.Contains(a, "--storage-driver") { foundSd = true } } if !foundLog { d.args = append(d.args, "--debug") } if d.storageDriver != "" && !foundSd { d.args = append(d.args, "--storage-driver", d.storageDriver) } d.args = append(d.args, providedArgs...) d.cmd = exec.Command(dockerdBinary, d.args...) d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") d.cmd.Stdout = out d.cmd.Stderr = out d.logFile = out if d.rootlessUser != nil { // sudo requires this for propagating signals setsid(d.cmd) } if err := d.cmd.Start(); err != nil { return errors.Wrapf(err, "[%s] could not start daemon container", d.id) } wait := make(chan error, 1) go func() { ret := d.cmd.Wait() d.log.Logf("[%s] exiting daemon", d.id) // If we send before logging, we might accidentally log _after_ the test is done. // As of Go 1.12, this incurs a panic instead of silently being dropped. wait <- ret close(wait) }() d.Wait = wait clientConfig, err := d.getClientConfig() if err != nil { return err } client := &http.Client{ Transport: clientConfig.transport, } req, err := http.NewRequest(http.MethodGet, "/_ping", nil) if err != nil { return errors.Wrapf(err, "[%s] could not create new request", d.id) } req.URL.Host = clientConfig.addr req.URL.Scheme = clientConfig.scheme ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() // make sure daemon is ready to receive requests for i := 0; ; i++ { d.log.Logf("[%s] waiting for daemon to start", d.id) select { case <-ctx.Done(): return errors.Wrapf(ctx.Err(), "[%s] daemon exited and never started", d.id) case err := <-d.Wait: return errors.Wrapf(err, "[%s] daemon exited during startup", d.id) default: rctx, rcancel := context.WithTimeout(context.TODO(), 2*time.Second) defer rcancel() resp, err := client.Do(req.WithContext(rctx)) if err != nil { if i > 2 { // don't log the first couple, this ends up just being noise d.log.Logf("[%s] error pinging daemon on start: %v", d.id, err) } select { case <-ctx.Done(): case <-time.After(500 * time.Millisecond): } continue } resp.Body.Close() if resp.StatusCode != http.StatusOK { d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status) } d.log.Logf("[%s] daemon started\n", d.id) d.Root, err = d.queryRootDir() if err != nil { return errors.Wrapf(err, "[%s] error querying daemon for root directory", d.id) } return nil } } } // StartWithBusybox will first start the daemon with Daemon.Start() // then save the busybox image from the main daemon and load it into this Daemon instance. func (d *Daemon) StartWithBusybox(t testing.TB, arg ...string) { t.Helper() d.Start(t, arg...) d.LoadBusybox(t) } // Kill will send a SIGKILL to the daemon func (d *Daemon) Kill() error { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } defer func() { d.logFile.Close() d.cmd = nil }() if err := d.cmd.Process.Kill(); err != nil { return err } if d.pidFile != "" { _ = os.Remove(d.pidFile) } return nil } // Pid returns the pid of the daemon func (d *Daemon) Pid() int { return d.cmd.Process.Pid } // Interrupt stops the daemon by sending it an Interrupt signal func (d *Daemon) Interrupt() error { return d.Signal(os.Interrupt) } // Signal sends the specified signal to the daemon if running func (d *Daemon) Signal(signal os.Signal) error { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } return d.cmd.Process.Signal(signal) } // DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its // stack to its log file and exit // This is used primarily for gathering debug information on test timeout func (d *Daemon) DumpStackAndQuit() { if d.cmd == nil || d.cmd.Process == nil { return } SignalDaemonDump(d.cmd.Process.Pid) } // Stop will send a SIGINT every second and wait for the daemon to stop. // If it times out, a SIGKILL is sent. // Stop will not delete the daemon directory. If a purged daemon is needed, // instantiate a new one with NewDaemon. // If an error occurs while starting the daemon, the test will fail. func (d *Daemon) Stop(t testing.TB) { t.Helper() err := d.StopWithError() if err != nil { if err != errDaemonNotStarted { t.Fatalf("[%s] error while stopping the daemon: %v", d.id, err) } else { t.Logf("[%s] daemon is not started", d.id) } } } // StopWithError will send a SIGINT every second and wait for the daemon to stop. // If it timeouts, a SIGKILL is sent. // Stop will not delete the daemon directory. If a purged daemon is needed, // instantiate a new one with NewDaemon. func (d *Daemon) StopWithError() (err error) { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } defer func() { if err != nil { d.log.Logf("[%s] error while stopping daemon: %v", d.id, err) } else { d.log.Logf("[%s] daemon stopped", d.id) if d.pidFile != "" { _ = os.Remove(d.pidFile) } } if err := d.logFile.Close(); err != nil { d.log.Logf("[%s] failed to close daemon logfile: %v", d.id, err) } d.cmd = nil }() i := 1 ticker := time.NewTicker(time.Second) defer ticker.Stop() tick := ticker.C d.log.Logf("[%s] stopping daemon", d.id) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { if strings.Contains(err.Error(), "os: process already finished") { return errDaemonNotStarted } return errors.Wrapf(err, "[%s] could not send signal", d.id) } out1: for { select { case err := <-d.Wait: return err case <-time.After(20 * time.Second): // time for stopping jobs and run onShutdown hooks d.log.Logf("[%s] daemon stop timed out after 20 seconds", d.id) break out1 } } out2: for { select { case err := <-d.Wait: return err case <-tick: i++ if i > 5 { d.log.Logf("[%s] tried to interrupt daemon for %d times, now try to kill it", d.id, i) break out2 } d.log.Logf("[%d] attempt #%d/5: daemon is still running with pid %d", i, d.cmd.Process.Pid) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { return errors.Wrapf(err, "[%s] attempt #%d/5 could not send signal", d.id, i) } } } if err := d.cmd.Process.Kill(); err != nil { d.log.Logf("[%s] failed to kill daemon: %v", d.id, err) return err } return nil } // Restart will restart the daemon by first stopping it and the starting it. // If an error occurs while starting the daemon, the test will fail. func (d *Daemon) Restart(t testing.TB, args ...string) { t.Helper() d.Stop(t) d.Start(t, args...) } // RestartWithError will restart the daemon by first stopping it and then starting it. func (d *Daemon) RestartWithError(arg ...string) error { if err := d.StopWithError(); err != nil { return err } return d.StartWithError(arg...) } func (d *Daemon) handleUserns() { // in the case of tests running a user namespace-enabled daemon, we have resolved // d.Root to be the actual final path of the graph dir after the "uid.gid" of // remapped root is added--we need to subtract it from the path before calling // start or else we will continue making subdirectories rather than truly restarting // with the same location/root: if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { d.Root = filepath.Dir(d.Root) } } // ReloadConfig asks the daemon to reload its configuration func (d *Daemon) ReloadConfig() error { if d.cmd == nil || d.cmd.Process == nil { return errors.New("daemon is not running") } errCh := make(chan error, 1) started := make(chan struct{}) go func() { _, body, err := request.Get("/events", request.Host(d.Sock())) close(started) if err != nil { errCh <- err return } defer body.Close() dec := json.NewDecoder(body) for { var e events.Message if err := dec.Decode(&e); err != nil { errCh <- err return } if e.Type != events.DaemonEventType { continue } if e.Action != "reload" { continue } close(errCh) // notify that we are done return } }() <-started if err := signalDaemonReload(d.cmd.Process.Pid); err != nil { return errors.Wrapf(err, "[%s] error signaling daemon reload", d.id) } select { case err := <-errCh: if err != nil { return errors.Wrapf(err, "[%s] error waiting for daemon reload event", d.id) } case <-time.After(30 * time.Second): return errors.Errorf("[%s] daemon reload event timed out after 30 seconds", d.id) } return nil } // LoadBusybox image into the daemon func (d *Daemon) LoadBusybox(t testing.TB) { t.Helper() clientHost, err := client.NewClientWithOpts(client.FromEnv) assert.NilError(t, err, "[%s] failed to create client", d.id) defer clientHost.Close() ctx := context.Background() reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"}) assert.NilError(t, err, "[%s] failed to download busybox", d.id) defer reader.Close() c := d.NewClientT(t) defer c.Close() resp, err := c.ImageLoad(ctx, reader, true) assert.NilError(t, err, "[%s] failed to load busybox", d.id) defer resp.Body.Close() } func (d *Daemon) getClientConfig() (*clientConfig, error) { var ( transport *http.Transport scheme string addr string proto string ) if d.UseDefaultTLSHost { option := &tlsconfig.Options{ CAFile: "fixtures/https/ca.pem", CertFile: "fixtures/https/client-cert.pem", KeyFile: "fixtures/https/client-key.pem", } tlsConfig, err := tlsconfig.Client(*option) if err != nil { return nil, err } transport = &http.Transport{ TLSClientConfig: tlsConfig, } addr = defaultTLSHost scheme = "https" proto = "tcp" } else if d.UseDefaultHost { addr = defaultUnixSocket proto = "unix" scheme = "http" transport = &http.Transport{} } else { addr = d.sockPath() proto = "unix" scheme = "http" transport = &http.Transport{} } if err := sockets.ConfigureTransport(transport, proto, addr); err != nil { return nil, err } transport.DisableKeepAlives = true if proto == "unix" { addr = filepath.Base(addr) } return &clientConfig{ transport: transport, scheme: scheme, addr: addr, }, nil } func (d *Daemon) queryRootDir() (string, error) { // update daemon root by asking /info endpoint (to support user // namespaced daemon with root remapped uid.gid directory) clientConfig, err := d.getClientConfig() if err != nil { return "", err } c := &http.Client{ Transport: clientConfig.transport, } req, err := http.NewRequest(http.MethodGet, "/info", nil) if err != nil { return "", err } req.Header.Set("Content-Type", "application/json") req.URL.Host = clientConfig.addr req.URL.Scheme = clientConfig.scheme resp, err := c.Do(req) if err != nil { return "", err } body := ioutils.NewReadCloserWrapper(resp.Body, func() error { return resp.Body.Close() }) type Info struct { DockerRootDir string } var b []byte var i Info b, err = request.ReadBody(body) if err == nil && resp.StatusCode == http.StatusOK { // read the docker root dir if err = json.Unmarshal(b, &i); err == nil { return i.DockerRootDir, nil } } return "", err } // Info returns the info struct for this daemon func (d *Daemon) Info(t testing.TB) types.Info { t.Helper() c := d.NewClientT(t) info, err := c.Info(context.Background()) assert.NilError(t, err) assert.NilError(t, c.Close()) return info } // cleanupRaftDir removes swarmkit wal files if present func cleanupRaftDir(t testing.TB, d *Daemon) { t.Helper() for _, p := range []string{"wal", "wal-v3-encrypted", "snap-v3-encrypted"} { dir := filepath.Join(d.Root, "swarm/raft", p) if err := os.RemoveAll(dir); err != nil { t.Logf("[%s] error removing %v: %v", d.id, dir, err) } } }
package daemon // import "github.com/docker/docker/testutil/daemon" import ( "context" "encoding/json" "fmt" "net/http" "os" "os/exec" "os/user" "path/filepath" "strconv" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/client" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/testutil/request" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" "gotest.tools/v3/assert" ) // LogT is the subset of the testing.TB interface used by the daemon. type LogT interface { Logf(string, ...interface{}) } // nopLog is a no-op implementation of LogT that is used in daemons created by // NewDaemon (where no testing.TB is available). type nopLog struct{} func (nopLog) Logf(string, ...interface{}) {} const ( defaultDockerdBinary = "dockerd" defaultContainerdSocket = "/var/run/docker/containerd/containerd.sock" defaultDockerdRootlessBinary = "dockerd-rootless.sh" defaultUnixSocket = "/var/run/docker.sock" defaultTLSHost = "localhost:2376" ) var errDaemonNotStarted = errors.New("daemon not started") // SockRoot holds the path of the default docker integration daemon socket var SockRoot = filepath.Join(os.TempDir(), "docker-integration") type clientConfig struct { transport *http.Transport scheme string addr string } // Daemon represents a Docker daemon for the testing framework type Daemon struct { Root string Folder string Wait chan error UseDefaultHost bool UseDefaultTLSHost bool id string logFile *os.File cmd *exec.Cmd storageDriver string userlandProxy bool defaultCgroupNamespaceMode string execRoot string experimental bool init bool dockerdBinary string log LogT pidFile string args []string containerdSocket string rootlessUser *user.User rootlessXDGRuntimeDir string // swarm related field swarmListenAddr string SwarmPort int // FIXME(vdemeester) should probably not be exported DefaultAddrPool []string SubnetSize uint32 DataPathPort uint32 OOMScoreAdjust int // cached information CachedInfo types.Info } // NewDaemon returns a Daemon instance to be used for testing. // The daemon will not automatically start. // The daemon will modify and create files under workingDir. func NewDaemon(workingDir string, ops ...Option) (*Daemon, error) { storageDriver := os.Getenv("DOCKER_GRAPHDRIVER") if err := os.MkdirAll(SockRoot, 0700); err != nil { return nil, errors.Wrapf(err, "failed to create daemon socket root %q", SockRoot) } id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) dir := filepath.Join(workingDir, id) daemonFolder, err := filepath.Abs(dir) if err != nil { return nil, err } daemonRoot := filepath.Join(daemonFolder, "root") if err := os.MkdirAll(daemonRoot, 0755); err != nil { return nil, errors.Wrapf(err, "failed to create daemon root %q", daemonRoot) } userlandProxy := true if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { if val, err := strconv.ParseBool(env); err != nil { userlandProxy = val } } d := &Daemon{ id: id, Folder: daemonFolder, Root: daemonRoot, storageDriver: storageDriver, userlandProxy: userlandProxy, // dxr stands for docker-execroot (shortened for avoiding unix(7) path length limitation) execRoot: filepath.Join(os.TempDir(), "dxr", id), dockerdBinary: defaultDockerdBinary, swarmListenAddr: defaultSwarmListenAddr, SwarmPort: DefaultSwarmPort, log: nopLog{}, containerdSocket: defaultContainerdSocket, } for _, op := range ops { op(d) } if d.rootlessUser != nil { if err := os.Chmod(SockRoot, 0777); err != nil { return nil, err } uid, err := strconv.Atoi(d.rootlessUser.Uid) if err != nil { return nil, err } gid, err := strconv.Atoi(d.rootlessUser.Gid) if err != nil { return nil, err } if err := os.Chown(d.Folder, uid, gid); err != nil { return nil, err } if err := os.Chown(d.Root, uid, gid); err != nil { return nil, err } if err := os.MkdirAll(filepath.Dir(d.execRoot), 0700); err != nil { return nil, err } if err := os.Chown(filepath.Dir(d.execRoot), uid, gid); err != nil { return nil, err } if err := os.MkdirAll(d.execRoot, 0700); err != nil { return nil, err } if err := os.Chown(d.execRoot, uid, gid); err != nil { return nil, err } d.rootlessXDGRuntimeDir = filepath.Join(d.Folder, "xdgrun") if err := os.MkdirAll(d.rootlessXDGRuntimeDir, 0700); err != nil { return nil, err } if err := os.Chown(d.rootlessXDGRuntimeDir, uid, gid); err != nil { return nil, err } d.containerdSocket = "" } return d, nil } // New returns a Daemon instance to be used for testing. // This will create a directory such as d123456789 in the folder specified by // $DOCKER_INTEGRATION_DAEMON_DEST or $DEST. // The daemon will not automatically start. func New(t testing.TB, ops ...Option) *Daemon { t.Helper() dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") if dest == "" { dest = os.Getenv("DEST") } dest = filepath.Join(dest, t.Name()) assert.Check(t, dest != "", "Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable") if os.Getenv("DOCKER_ROOTLESS") != "" { if os.Getenv("DOCKER_REMAP_ROOT") != "" { t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_REMAP_ROOT currently") } if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { if val, err := strconv.ParseBool(env); err == nil && !val { t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_USERLANDPROXY=false") } } ops = append(ops, WithRootlessUser("unprivilegeduser")) } ops = append(ops, WithOOMScoreAdjust(-500)) d, err := NewDaemon(dest, ops...) assert.NilError(t, err, "could not create daemon at %q", dest) if d.rootlessUser != nil && d.dockerdBinary != defaultDockerdBinary { t.Skipf("DOCKER_ROOTLESS doesn't support specifying non-default dockerd binary path %q", d.dockerdBinary) } return d } // BinaryPath returns the binary and its arguments. func (d *Daemon) BinaryPath() (string, error) { dockerdBinary, err := exec.LookPath(d.dockerdBinary) if err != nil { return "", errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id) } return dockerdBinary, nil } // ContainersNamespace returns the containerd namespace used for containers. func (d *Daemon) ContainersNamespace() string { return d.id } // RootDir returns the root directory of the daemon. func (d *Daemon) RootDir() string { return d.Root } // ID returns the generated id of the daemon func (d *Daemon) ID() string { return d.id } // StorageDriver returns the configured storage driver of the daemon func (d *Daemon) StorageDriver() string { return d.storageDriver } // Sock returns the socket path of the daemon func (d *Daemon) Sock() string { return fmt.Sprintf("unix://" + d.sockPath()) } func (d *Daemon) sockPath() string { return filepath.Join(SockRoot, d.id+".sock") } // LogFileName returns the path the daemon's log file func (d *Daemon) LogFileName() string { return d.logFile.Name() } // ReadLogFile returns the content of the daemon log file func (d *Daemon) ReadLogFile() ([]byte, error) { _ = d.logFile.Sync() return os.ReadFile(d.logFile.Name()) } // NewClientT creates new client based on daemon's socket path func (d *Daemon) NewClientT(t testing.TB, extraOpts ...client.Opt) *client.Client { t.Helper() c, err := d.NewClient(extraOpts...) assert.NilError(t, err, "[%s] could not create daemon client", d.id) return c } // NewClient creates new client based on daemon's socket path func (d *Daemon) NewClient(extraOpts ...client.Opt) (*client.Client, error) { clientOpts := []client.Opt{ client.FromEnv, client.WithHost(d.Sock()), } clientOpts = append(clientOpts, extraOpts...) return client.NewClientWithOpts(clientOpts...) } // Cleanup cleans the daemon files : exec root (network namespaces, ...), swarmkit files func (d *Daemon) Cleanup(t testing.TB) { t.Helper() cleanupMount(t, d) cleanupRaftDir(t, d) cleanupNetworkNamespace(t, d) } // Start starts the daemon and return once it is ready to receive requests. func (d *Daemon) Start(t testing.TB, args ...string) { t.Helper() if err := d.StartWithError(args...); err != nil { d.DumpStackAndQuit() // in case the daemon is stuck t.Fatalf("[%s] failed to start daemon with arguments %v : %v", d.id, d.args, err) } } // StartWithError starts the daemon and return once it is ready to receive requests. // It returns an error in case it couldn't start. func (d *Daemon) StartWithError(args ...string) error { logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) if err != nil { return errors.Wrapf(err, "[%s] failed to create logfile", d.id) } return d.StartWithLogFile(logFile, args...) } // StartWithLogFile will start the daemon and attach its streams to a given file. func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { d.handleUserns() dockerdBinary, err := d.BinaryPath() if err != nil { return err } if d.pidFile == "" { d.pidFile = filepath.Join(d.Folder, "docker.pid") } d.args = []string{} if d.rootlessUser != nil { if d.dockerdBinary != defaultDockerdBinary { return errors.Errorf("[%s] DOCKER_ROOTLESS doesn't support non-default dockerd binary path %q", d.id, d.dockerdBinary) } dockerdBinary = "sudo" d.args = append(d.args, "-u", d.rootlessUser.Username, "-E", "XDG_RUNTIME_DIR="+d.rootlessXDGRuntimeDir, "-E", "HOME="+d.rootlessUser.HomeDir, "-E", "PATH="+os.Getenv("PATH"), "--", defaultDockerdRootlessBinary, ) } d.args = append(d.args, "--data-root", d.Root, "--exec-root", d.execRoot, "--pidfile", d.pidFile, fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), "--containerd-namespace", d.id, "--containerd-plugins-namespace", d.id+"p", ) if d.containerdSocket != "" { d.args = append(d.args, "--containerd", d.containerdSocket) } if d.defaultCgroupNamespaceMode != "" { d.args = append(d.args, "--default-cgroupns-mode", d.defaultCgroupNamespaceMode) } if d.experimental { d.args = append(d.args, "--experimental") } if d.init { d.args = append(d.args, "--init") } if !(d.UseDefaultHost || d.UseDefaultTLSHost) { d.args = append(d.args, "--host", d.Sock()) } if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { d.args = append(d.args, "--userns-remap", root) } // If we don't explicitly set the log-level or debug flag(-D) then // turn on debug mode foundLog := false foundSd := false for _, a := range providedArgs { if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { foundLog = true } if strings.Contains(a, "--storage-driver") { foundSd = true } } if !foundLog { d.args = append(d.args, "--debug") } if d.storageDriver != "" && !foundSd { d.args = append(d.args, "--storage-driver", d.storageDriver) } d.args = append(d.args, providedArgs...) d.cmd = exec.Command(dockerdBinary, d.args...) d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") d.cmd.Stdout = out d.cmd.Stderr = out d.logFile = out if d.rootlessUser != nil { // sudo requires this for propagating signals setsid(d.cmd) } if err := d.cmd.Start(); err != nil { return errors.Wrapf(err, "[%s] could not start daemon container", d.id) } wait := make(chan error, 1) go func() { ret := d.cmd.Wait() d.log.Logf("[%s] exiting daemon", d.id) // If we send before logging, we might accidentally log _after_ the test is done. // As of Go 1.12, this incurs a panic instead of silently being dropped. wait <- ret close(wait) }() d.Wait = wait clientConfig, err := d.getClientConfig() if err != nil { return err } client := &http.Client{ Transport: clientConfig.transport, } req, err := http.NewRequest(http.MethodGet, "/_ping", nil) if err != nil { return errors.Wrapf(err, "[%s] could not create new request", d.id) } req.URL.Host = clientConfig.addr req.URL.Scheme = clientConfig.scheme ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() // make sure daemon is ready to receive requests for i := 0; ; i++ { d.log.Logf("[%s] waiting for daemon to start", d.id) select { case <-ctx.Done(): return errors.Wrapf(ctx.Err(), "[%s] daemon exited and never started", d.id) case err := <-d.Wait: return errors.Wrapf(err, "[%s] daemon exited during startup", d.id) default: rctx, rcancel := context.WithTimeout(context.TODO(), 2*time.Second) defer rcancel() resp, err := client.Do(req.WithContext(rctx)) if err != nil { if i > 2 { // don't log the first couple, this ends up just being noise d.log.Logf("[%s] error pinging daemon on start: %v", d.id, err) } select { case <-ctx.Done(): case <-time.After(500 * time.Millisecond): } continue } resp.Body.Close() if resp.StatusCode != http.StatusOK { d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status) } d.log.Logf("[%s] daemon started\n", d.id) d.Root, err = d.queryRootDir() if err != nil { return errors.Wrapf(err, "[%s] error querying daemon for root directory", d.id) } return nil } } } // StartWithBusybox will first start the daemon with Daemon.Start() // then save the busybox image from the main daemon and load it into this Daemon instance. func (d *Daemon) StartWithBusybox(t testing.TB, arg ...string) { t.Helper() d.Start(t, arg...) d.LoadBusybox(t) } // Kill will send a SIGKILL to the daemon func (d *Daemon) Kill() error { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } defer func() { d.logFile.Close() d.cmd = nil }() if err := d.cmd.Process.Kill(); err != nil { return err } if d.pidFile != "" { _ = os.Remove(d.pidFile) } return nil } // Pid returns the pid of the daemon func (d *Daemon) Pid() int { return d.cmd.Process.Pid } // Interrupt stops the daemon by sending it an Interrupt signal func (d *Daemon) Interrupt() error { return d.Signal(os.Interrupt) } // Signal sends the specified signal to the daemon if running func (d *Daemon) Signal(signal os.Signal) error { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } return d.cmd.Process.Signal(signal) } // DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its // stack to its log file and exit // This is used primarily for gathering debug information on test timeout func (d *Daemon) DumpStackAndQuit() { if d.cmd == nil || d.cmd.Process == nil { return } SignalDaemonDump(d.cmd.Process.Pid) } // Stop will send a SIGINT every second and wait for the daemon to stop. // If it times out, a SIGKILL is sent. // Stop will not delete the daemon directory. If a purged daemon is needed, // instantiate a new one with NewDaemon. // If an error occurs while starting the daemon, the test will fail. func (d *Daemon) Stop(t testing.TB) { t.Helper() err := d.StopWithError() if err != nil { if err != errDaemonNotStarted { t.Fatalf("[%s] error while stopping the daemon: %v", d.id, err) } else { t.Logf("[%s] daemon is not started", d.id) } } } // StopWithError will send a SIGINT every second and wait for the daemon to stop. // If it timeouts, a SIGKILL is sent. // Stop will not delete the daemon directory. If a purged daemon is needed, // instantiate a new one with NewDaemon. func (d *Daemon) StopWithError() (err error) { if d.cmd == nil || d.Wait == nil { return errDaemonNotStarted } defer func() { if err != nil { d.log.Logf("[%s] error while stopping daemon: %v", d.id, err) } else { d.log.Logf("[%s] daemon stopped", d.id) if d.pidFile != "" { _ = os.Remove(d.pidFile) } } if err := d.logFile.Close(); err != nil { d.log.Logf("[%s] failed to close daemon logfile: %v", d.id, err) } d.cmd = nil }() i := 1 ticker := time.NewTicker(time.Second) defer ticker.Stop() tick := ticker.C d.log.Logf("[%s] stopping daemon", d.id) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { if strings.Contains(err.Error(), "os: process already finished") { return errDaemonNotStarted } return errors.Wrapf(err, "[%s] could not send signal", d.id) } out1: for { select { case err := <-d.Wait: return err case <-time.After(20 * time.Second): // time for stopping jobs and run onShutdown hooks d.log.Logf("[%s] daemon stop timed out after 20 seconds", d.id) break out1 } } out2: for { select { case err := <-d.Wait: return err case <-tick: i++ if i > 5 { d.log.Logf("[%s] tried to interrupt daemon for %d times, now try to kill it", d.id, i) break out2 } d.log.Logf("[%d] attempt #%d/5: daemon is still running with pid %d", i, d.cmd.Process.Pid) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { return errors.Wrapf(err, "[%s] attempt #%d/5 could not send signal", d.id, i) } } } if err := d.cmd.Process.Kill(); err != nil { d.log.Logf("[%s] failed to kill daemon: %v", d.id, err) return err } return nil } // Restart will restart the daemon by first stopping it and the starting it. // If an error occurs while starting the daemon, the test will fail. func (d *Daemon) Restart(t testing.TB, args ...string) { t.Helper() d.Stop(t) d.Start(t, args...) } // RestartWithError will restart the daemon by first stopping it and then starting it. func (d *Daemon) RestartWithError(arg ...string) error { if err := d.StopWithError(); err != nil { return err } return d.StartWithError(arg...) } func (d *Daemon) handleUserns() { // in the case of tests running a user namespace-enabled daemon, we have resolved // d.Root to be the actual final path of the graph dir after the "uid.gid" of // remapped root is added--we need to subtract it from the path before calling // start or else we will continue making subdirectories rather than truly restarting // with the same location/root: if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { d.Root = filepath.Dir(d.Root) } } // ReloadConfig asks the daemon to reload its configuration func (d *Daemon) ReloadConfig() error { if d.cmd == nil || d.cmd.Process == nil { return errors.New("daemon is not running") } errCh := make(chan error, 1) started := make(chan struct{}) go func() { _, body, err := request.Get("/events", request.Host(d.Sock())) close(started) if err != nil { errCh <- err return } defer body.Close() dec := json.NewDecoder(body) for { var e events.Message if err := dec.Decode(&e); err != nil { errCh <- err return } if e.Type != events.DaemonEventType { continue } if e.Action != "reload" { continue } close(errCh) // notify that we are done return } }() <-started if err := signalDaemonReload(d.cmd.Process.Pid); err != nil { return errors.Wrapf(err, "[%s] error signaling daemon reload", d.id) } select { case err := <-errCh: if err != nil { return errors.Wrapf(err, "[%s] error waiting for daemon reload event", d.id) } case <-time.After(30 * time.Second): return errors.Errorf("[%s] daemon reload event timed out after 30 seconds", d.id) } return nil } // LoadBusybox image into the daemon func (d *Daemon) LoadBusybox(t testing.TB) { t.Helper() clientHost, err := client.NewClientWithOpts(client.FromEnv) assert.NilError(t, err, "[%s] failed to create client", d.id) defer clientHost.Close() ctx := context.Background() reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"}) assert.NilError(t, err, "[%s] failed to download busybox", d.id) defer reader.Close() c := d.NewClientT(t) defer c.Close() resp, err := c.ImageLoad(ctx, reader, true) assert.NilError(t, err, "[%s] failed to load busybox", d.id) defer resp.Body.Close() } func (d *Daemon) getClientConfig() (*clientConfig, error) { var ( transport *http.Transport scheme string addr string proto string ) if d.UseDefaultTLSHost { option := &tlsconfig.Options{ CAFile: "fixtures/https/ca.pem", CertFile: "fixtures/https/client-cert.pem", KeyFile: "fixtures/https/client-key.pem", } tlsConfig, err := tlsconfig.Client(*option) if err != nil { return nil, err } transport = &http.Transport{ TLSClientConfig: tlsConfig, } addr = defaultTLSHost scheme = "https" proto = "tcp" } else if d.UseDefaultHost { addr = defaultUnixSocket proto = "unix" scheme = "http" transport = &http.Transport{} } else { addr = d.sockPath() proto = "unix" scheme = "http" transport = &http.Transport{} } if err := sockets.ConfigureTransport(transport, proto, addr); err != nil { return nil, err } transport.DisableKeepAlives = true if proto == "unix" { addr = filepath.Base(addr) } return &clientConfig{ transport: transport, scheme: scheme, addr: addr, }, nil } func (d *Daemon) queryRootDir() (string, error) { // update daemon root by asking /info endpoint (to support user // namespaced daemon with root remapped uid.gid directory) clientConfig, err := d.getClientConfig() if err != nil { return "", err } c := &http.Client{ Transport: clientConfig.transport, } req, err := http.NewRequest(http.MethodGet, "/info", nil) if err != nil { return "", err } req.Header.Set("Content-Type", "application/json") req.URL.Host = clientConfig.addr req.URL.Scheme = clientConfig.scheme resp, err := c.Do(req) if err != nil { return "", err } body := ioutils.NewReadCloserWrapper(resp.Body, func() error { return resp.Body.Close() }) type Info struct { DockerRootDir string } var b []byte var i Info b, err = request.ReadBody(body) if err == nil && resp.StatusCode == http.StatusOK { // read the docker root dir if err = json.Unmarshal(b, &i); err == nil { return i.DockerRootDir, nil } } return "", err } // Info returns the info struct for this daemon func (d *Daemon) Info(t testing.TB) types.Info { t.Helper() c := d.NewClientT(t) info, err := c.Info(context.Background()) assert.NilError(t, err) assert.NilError(t, c.Close()) return info } // cleanupRaftDir removes swarmkit wal files if present func cleanupRaftDir(t testing.TB, d *Daemon) { t.Helper() for _, p := range []string{"wal", "wal-v3-encrypted", "snap-v3-encrypted"} { dir := filepath.Join(d.Root, "swarm/raft", p) if err := os.RemoveAll(dir); err != nil { t.Logf("[%s] error removing %v: %v", d.id, dir, err) } } }
thaJeztah
02016803f02e8a0145e7706cfbd21c8b59e3c28d
b64b9811c3adb69641e2356fd12e2077d1f34ae1
The failure that was reported was in `d9ac12b0678f4`, but it also has the string, but I do see it ran into another failure with the libnetwork socket, but that looks to be in the shutdown cycle; ``` time="2021-09-21T12:55:04.290097255Z" level=info msg="API listen on /tmp/docker-integration/d9ac12b0678f4.sock" time="2021-09-21T12:55:04.414946112Z" level=debug msg="Calling GET /_ping" time="2021-09-21T12:55:04.415317278Z" level=debug msg="Calling GET /info" time="2021-09-21T12:55:04.430754586Z" level=info msg="Got signal to reload configuration, reloading from: /etc/docker/daemon.json" time="2021-09-21T12:55:04.431178648Z" level=debug msg="Reset Max Concurrent Downloads: 3" time="2021-09-21T12:55:04.431201851Z" level=debug msg="Reset Max Concurrent Uploads: 5" time="2021-09-21T12:55:04.431221426Z" level=debug msg="Reset Max Download Attempts: 5" time="2021-09-21T12:55:04.432309643Z" level=info msg="Reloaded configuration: {\"storage-driver\":\"overlay2\",\"mtu\":1500,\"pidfile\":\"/go/src/github.com/docker/docker/bundles/test-integration-flaky/TestDaemonProxy/reload_sanitized/d9ac12b0678f4/docker.pid\",\"data-root\":\"/go/src/github.com/docker/docker/bundles/test-integration-flaky/TestDaemonProxy/reload_sanitized/d9ac12b0678f4/root\",\"exec-root\":\"/tmp/dxr/d9ac12b0678f4\",\"group\":\"docker\",\"deprecated-key-path\":\"/etc/docker/key.json\",\"max-concurrent-downloads\":3,\"max-concurrent-uploads\":5,\"max-download-attempts\":5,\"shutdown-timeout\":15,\"debug\":true,\"hosts\":[\"unix:///tmp/docker-integration/d9ac12b0678f4.sock\"],\"log-level\":\"info\",\"swarm-default-advertise-addr\":\"\",\"swarm-raft-heartbeat-tick\":0,\"swarm-raft-election-tick\":0,\"metrics-addr\":\"\",\"host-gateway-ip\":\"172.18.0.1\",\"log-driver\":\"json-file\",\"ip\":\"0.0.0.0\",\"icc\":true,\"iptables\":true,\"ip-forward\":true,\"ip-masq\":true,\"userland-proxy\":true,\"default-address-pools\":{\"Values\":null},\"network-control-plane-mtu\":1500,\"experimental\":false,\"containerd\":\"/var/run/docker/containerd/containerd.sock\",\"builder\":{\"GC\":{},\"Entitlements\":{}},\"containerd-namespace\":\"d9ac12b0678f4\",\"containerd-plugin-namespace\":\"d9ac12b0678f4p\",\"runtimes\":{\"io.containerd.runc.v2\":{\"path\":\"runc\"},\"io.containerd.runtime.v1.linux\":{\"path\":\"runc\"},\"runc\":{\"path\":\"runc\"}},\"default-runtime\":\"runc\",\"seccomp-profile\":\"builtin\",\"default-shm-size\":67108864,\"default-ipc-mode\":\"private\",\"default-cgroupns-mode\":\"host\",\"resolv-conf\":\"/etc/resolv.conf\",\"http-proxy\":\"https://xxxxx:[email protected]\",\"https-proxy\":\"https://xxxxx:[email protected]\",\"no-proxy\":\"example.com\"}" time="2021-09-21T12:55:04.435328719Z" level=info msg="Processing signal 'interrupt'" time="2021-09-21T12:55:04.435439253Z" level=debug msg="daemon configured with a 15 seconds minimum shutdown timeout" time="2021-09-21T12:55:04.435514624Z" level=debug msg="start clean shutdown of all containers with a 15 seconds timeout..." time="2021-09-21T12:55:04.435590112Z" level=debug msg="found 0 orphan layers" time="2021-09-21T12:55:04.437848674Z" level=debug msg="Unix socket /tmp/dxr/d9ac12b0678f4/libnetwork/f7725651065a.sock doesn't exist. cannot accept client connections" time="2021-09-21T12:55:04.437938435Z" level=debug msg="Cleaning up old mountid : start." time="2021-09-21T12:55:04.439961024Z" level=debug msg="Cleaning up old mountid : done." time="2021-09-21T12:55:04.444201003Z" level=debug msg="Clean shutdown succeeded" time="2021-09-21T12:55:04.444236880Z" level=info msg="Daemon shutdown complete" ``` The same failure is appearing in all of them, it seems: ```bash grep -lr 'cannot accept client connections' . | wc -l 25 ``` Actually; perhaps even in all tests that shutdown the daemon (running within the `bundles` top level directory); ```bash grep -lr 'cannot accept client connections' . | wc -l 419 ```
thaJeztah
4,471
moby/moby
42,829
Test: wait for network changes in TestNetworkDBNodeJoinLeaveIteration
Signed-off-by: David Wang <[email protected]> fix #42698 In network node change test, the expected behavior is focused on how many nodes left in networkDB, besides timing issues, things would also go tricky for a leave-then-join sequence, if the check (counting the nodes) happened before the first "leave" event, then the testcase actually miss its target and report PASS without verifying its final result; if the check happened after the 'leave' event, but before the 'join' event, the test would report FAIL unnecessary; This code change would check both the db changes and the node count, it would report PASS only when networkdb has indeed changed and the node count is expected
null
2021-09-08 03:46:14+00:00
2022-07-22 04:55:12+00:00
libnetwork/networkdb/networkdb_test.go
package networkdb import ( "fmt" "log" "net" "os" "strconv" "sync/atomic" "testing" "time" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-events" "github.com/hashicorp/memberlist" "github.com/sirupsen/logrus" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" ) var dbPort int32 = 10000 func TestMain(m *testing.M) { os.WriteFile("/proc/sys/net/ipv6/conf/lo/disable_ipv6", []byte{'0', '\n'}, 0644) logrus.SetLevel(logrus.ErrorLevel) os.Exit(m.Run()) } func launchNode(t *testing.T, conf Config) *NetworkDB { t.Helper() db, err := New(&conf) assert.NilError(t, err) return db } func createNetworkDBInstances(t *testing.T, num int, namePrefix string, conf *Config) []*NetworkDB { t.Helper() var dbs []*NetworkDB for i := 0; i < num; i++ { localConfig := *conf localConfig.Hostname = fmt.Sprintf("%s%d", namePrefix, i+1) localConfig.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) localConfig.BindPort = int(atomic.AddInt32(&dbPort, 1)) db := launchNode(t, localConfig) if i != 0 { assert.Check(t, db.Join([]string{fmt.Sprintf("localhost:%d", db.config.BindPort-1)})) } dbs = append(dbs, db) } // Wait till the cluster creation is successful check := func(t poll.LogT) poll.Result { // Check that the cluster is properly created for i := 0; i < num; i++ { if num != len(dbs[i].ClusterPeers()) { return poll.Continue("%s:Waiting for cluster peers to be established", dbs[i].config.Hostname) } } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(2*time.Second), poll.WithTimeout(20*time.Second)) return dbs } func closeNetworkDBInstances(t *testing.T, dbs []*NetworkDB) { t.Helper() log.Print("Closing DB instances...") for _, db := range dbs { db.Close() } } func (db *NetworkDB) verifyNodeExistence(t *testing.T, node string, present bool) { t.Helper() for i := 0; i < 80; i++ { db.RLock() _, ok := db.nodes[node] db.RUnlock() if present && ok { return } if !present && !ok { return } time.Sleep(50 * time.Millisecond) } t.Errorf("%v(%v): Node existence verification for node %s failed", db.config.Hostname, db.config.NodeID, node) } func (db *NetworkDB) verifyNetworkExistence(t *testing.T, node string, id string, present bool) { t.Helper() const sleepInterval = 50 * time.Millisecond var maxRetries int64 if dl, ok := t.Deadline(); ok { maxRetries = int64(time.Until(dl) / sleepInterval) } else { maxRetries = 80 } for i := int64(0); i < maxRetries; i++ { db.RLock() nn, nnok := db.networks[node] db.RUnlock() if nnok { n, ok := nn[id] if present && ok { return } if !present && ((ok && n.leaving) || !ok) { return } } time.Sleep(sleepInterval) } t.Error("Network existence verification failed") } func (db *NetworkDB) verifyEntryExistence(t *testing.T, tname, nid, key, value string, present bool) { t.Helper() n := 80 for i := 0; i < n; i++ { entry, err := db.getEntry(tname, nid, key) if present && err == nil && string(entry.value) == value { return } if !present && ((err == nil && entry.deleting) || (err != nil)) { return } if i == n-1 && !present && err != nil { return } time.Sleep(50 * time.Millisecond) } t.Errorf("Entry existence verification test failed for %v(%v)", db.config.Hostname, db.config.NodeID) } func testWatch(t *testing.T, ch chan events.Event, ev interface{}, tname, nid, key, value string) { t.Helper() select { case rcvdEv := <-ch: assert.Check(t, is.Equal(fmt.Sprintf("%T", rcvdEv), fmt.Sprintf("%T", ev))) switch typ := rcvdEv.(type) { case CreateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case UpdateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case DeleteEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) } case <-time.After(time.Second): t.Fail() return } } func TestNetworkDBSimple(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetwork(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetworks(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) n := 10 for i := 1; i <= n; i++ { err := dbs[0].JoinNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].JoinNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), true) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), true) } for i := 1; i <= n; i++ { err := dbs[0].LeaveNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].LeaveNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), false) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), false) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntry(t *testing.T) { dbs := createNetworkDBInstances(t, 3, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[2].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntries(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) n := 10 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].CreateEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i), []byte(fmt.Sprintf("test_value1%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), fmt.Sprintf("test_value1%d", i), true) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } // Verify deletes for i := 1; i <= n; i++ { err = dbs[0].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), "", false) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), "", false) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeLeave(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[0].Close() dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) dbs[1].Close() } func TestNetworkDBWatch(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) ch, cancel := dbs[1].Watch("", "", "") err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) testWatch(t, ch.C, CreateEvent{}, "test_table", "network1", "test_key", "test_value") err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) testWatch(t, ch.C, UpdateEvent{}, "test_table", "network1", "test_key", "test_updated_value") err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) testWatch(t, ch.C, DeleteEvent{}, "test_table", "network1", "test_key", "") cancel() closeNetworkDBInstances(t, dbs) } func TestNetworkDBBulkSync(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) n := 1000 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDMediumCluster(t *testing.T) { n := 5 dbs := createNetworkDBInstances(t, n, "node", DefaultConfig()) for i := 0; i < n; i++ { for j := 0; j < n; j++ { if i == j { continue } dbs[i].verifyNodeExistence(t, dbs[j].config.NodeID, true) } } for i := 0; i < n; i++ { err := dbs[i].JoinNetwork("network1") assert.NilError(t, err) } for i := 0; i < n; i++ { for j := 0; j < n; j++ { dbs[i].verifyNetworkExistence(t, dbs[j].config.NodeID, "network1", true) } } err := dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) } err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) } err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) } for i := 1; i < n; i++ { _, err = dbs[i].GetEntry("test_table", "network1", "test_key") assert.Check(t, is.ErrorContains(err, "")) assert.Check(t, is.Contains(err.Error(), "deleted and pending garbage collection"), err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeJoinLeaveIteration(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) // Single node Join/Leave err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) if len(dbs[0].networkNodes["network1"]) != 1 { t.Fatalf("The networkNodes list has to have be 1 instead of %d", len(dbs[0].networkNodes["network1"])) } err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) if len(dbs[0].networkNodes["network1"]) != 0 { t.Fatalf("The networkNodes list has to have be 0 instead of %d", len(dbs[0].networkNodes["network1"])) } // Multiple nodes Join/Leave err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) // Wait for the propagation on db[0] dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) if len(dbs[0].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[0].networkNodes["network1"]), dbs[0].networkNodes["network1"]) } if n, ok := dbs[0].networks[dbs[0].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Wait for the propagation on db[1] dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) if len(dbs[1].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[1].networkNodes["network1"]), dbs[1].networkNodes["network1"]) } if n, ok := dbs[1].networks[dbs[1].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Try a quick leave/join err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) if len(dbs[0].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[0].networkNodes["network1"]), dbs[0].networkNodes["network1"]) } dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) if len(dbs[1].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[1].networkNodes["network1"]), dbs[1].networkNodes["network1"]) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBGarbageCollection(t *testing.T) { keysWriteDelete := 5 config := DefaultConfig() config.reapEntryInterval = 30 * time.Second config.StatsPrintPeriod = 15 * time.Second dbs := createNetworkDBInstances(t, 3, "node", config) // 2 Nodes join network err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].CreateEntry("testTable", "network1", "key-"+strconv.Itoa(i), []byte("value")) assert.NilError(t, err) } time.Sleep(time.Second) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].DeleteEntry("testTable", "network1", "key-"+strconv.Itoa(i)) assert.NilError(t, err) } for i := 0; i < 2; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // from this point the timer for the garbage collection started, wait 5 seconds and then join a new node time.Sleep(5 * time.Second) err = dbs[2].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // at this point the entries should had been all deleted time.Sleep(30 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } // make sure that entries are not coming back time.Sleep(15 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } closeNetworkDBInstances(t, dbs) } func TestFindNode(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["active"] = &node{Node: memberlist.Node{Name: "active"}} dbs[0].failedNodes["failed"] = &node{Node: memberlist.Node{Name: "failed"}} dbs[0].leftNodes["left"] = &node{Node: memberlist.Node{Name: "left"}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) n, currState, m := dbs[0].findNode("active") assert.Check(t, n != nil) assert.Check(t, is.Equal("active", n.Name)) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, m != nil) // delete the entry manually delete(m, "active") // test if can be still find n, currState, m = dbs[0].findNode("active") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) n, currState, m = dbs[0].findNode("failed") assert.Check(t, n != nil) assert.Check(t, is.Equal("failed", n.Name)) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, m != nil) // find and remove n, currState, m = dbs[0].findNode("left") assert.Check(t, n != nil) assert.Check(t, is.Equal("left", n.Name)) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, m != nil) delete(m, "left") n, currState, m = dbs[0].findNode("left") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) closeNetworkDBInstances(t, dbs) } func TestChangeNodeState(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1"}} dbs[0].nodes["node2"] = &node{Node: memberlist.Node{Name: "node2"}} dbs[0].nodes["node3"] = &node{Node: memberlist.Node{Name: "node3"}} // active nodes is 4 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) n, currState, m := dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) // node1 to failed dbs[0].changeNodeState("node1", nodeFailedState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // node1 back to active dbs[0].changeNodeState("node1", nodeActiveState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, is.Equal(time.Duration(0), n.reapTime)) // node1 to left dbs[0].changeNodeState("node1", nodeLeftState) dbs[0].changeNodeState("node2", nodeLeftState) dbs[0].changeNodeState("node3", nodeLeftState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node2") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node2", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node3") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node3", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 1)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestNodeReincarnation(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1", Addr: net.ParseIP("192.168.1.1")}} dbs[0].leftNodes["node2"] = &node{Node: memberlist.Node{Name: "node2", Addr: net.ParseIP("192.168.1.2")}} dbs[0].failedNodes["node3"] = &node{Node: memberlist.Node{Name: "node3", Addr: net.ParseIP("192.168.1.3")}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) b := dbs[0].purgeReincarnation(&memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}) assert.Check(t, b) dbs[0].nodes["node4"] = &node{Node: memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.2")}) assert.Check(t, b) dbs[0].nodes["node5"] = &node{Node: memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.3")}) assert.Check(t, b) dbs[0].nodes["node6"] = &node{Node: memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.10")}) assert.Check(t, !b) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestParallelCreate(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestParallelDelete(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) assert.NilError(t, err) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].DeleteEntry("testTable", "testNetwork", "key") if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestNetworkDBIslands(t *testing.T) { pollTimeout := func() time.Duration { const defaultTimeout = 120 * time.Second dl, ok := t.Deadline() if !ok { return defaultTimeout } if d := time.Until(dl); d <= defaultTimeout { return d } return defaultTimeout } logrus.SetLevel(logrus.DebugLevel) conf := DefaultConfig() // Shorten durations to speed up test execution. conf.rejoinClusterDuration = conf.rejoinClusterDuration / 10 conf.rejoinClusterInterval = conf.rejoinClusterInterval / 10 dbs := createNetworkDBInstances(t, 5, "node", conf) // Get the node IP used currently node := dbs[0].nodes[dbs[0].config.NodeID] baseIPStr := node.Addr.String() // Node 0,1,2 are going to be the 3 bootstrap nodes members := []string{fmt.Sprintf("%s:%d", baseIPStr, dbs[0].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[1].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[2].config.BindPort)} // Rejoining will update the list of the bootstrap members for i := 3; i < 5; i++ { t.Logf("Re-joining: %d", i) assert.Check(t, dbs[i].Join(members)) } // Now the 3 bootstrap nodes will cleanly leave, and will be properly removed from the other 2 nodes for i := 0; i < 3; i++ { logrus.Infof("node %d leaving", i) dbs[i].Close() } checkDBs := make(map[string]*NetworkDB) for i := 3; i < 5; i++ { db := dbs[i] checkDBs[db.config.Hostname] = db } // Give some time to let the system propagate the messages and free up the ports check := func(t poll.LogT) poll.Result { // Verify that the nodes are actually all gone and marked appropiately for name, db := range checkDBs { db.RLock() if (len(db.leftNodes) != 3) || (len(db.failedNodes) != 0) { for name := range db.leftNodes { t.Logf("%s: Node %s left", db.config.Hostname, name) } for name := range db.failedNodes { t.Logf("%s: Node %s failed", db.config.Hostname, name) } db.RUnlock() return poll.Continue("%s:Waiting for all nodes to cleanly leave, left: %d, failed nodes: %d", name, len(db.leftNodes), len(db.failedNodes)) } db.RUnlock() t.Logf("%s: OK", name) delete(checkDBs, name) } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) // Spawn again the first 3 nodes with different names but same IP:port for i := 0; i < 3; i++ { logrus.Infof("node %d coming back", i) dbs[i].config.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) dbs[i] = launchNode(t, *dbs[i].config) } // Give some time for the reconnect routine to run, it runs every 6s. check = func(t poll.LogT) poll.Result { // Verify that the cluster is again all connected. Note that the 3 previous node did not do any join for i := 0; i < 5; i++ { db := dbs[i] db.RLock() if len(db.nodes) != 5 { db.RUnlock() return poll.Continue("%s:Waiting to connect to all nodes", dbs[i].config.Hostname) } if len(db.failedNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting for 0 failedNodes", dbs[i].config.Hostname) } if i < 3 { // nodes from 0 to 3 has no left nodes if len(db.leftNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting to have no leftNodes", dbs[i].config.Hostname) } } else { // nodes from 4 to 5 has the 3 previous left nodes if len(db.leftNodes) != 3 { db.RUnlock() return poll.Continue("%s:Waiting to have 3 leftNodes", dbs[i].config.Hostname) } } db.RUnlock() } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) closeNetworkDBInstances(t, dbs) }
package networkdb import ( "fmt" "log" "net" "os" "strconv" "sync/atomic" "testing" "time" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-events" "github.com/hashicorp/memberlist" "github.com/hashicorp/serf/serf" "github.com/sirupsen/logrus" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" ) var dbPort int32 = 10000 func TestMain(m *testing.M) { os.WriteFile("/proc/sys/net/ipv6/conf/lo/disable_ipv6", []byte{'0', '\n'}, 0644) logrus.SetLevel(logrus.ErrorLevel) os.Exit(m.Run()) } func launchNode(t *testing.T, conf Config) *NetworkDB { t.Helper() db, err := New(&conf) assert.NilError(t, err) return db } func createNetworkDBInstances(t *testing.T, num int, namePrefix string, conf *Config) []*NetworkDB { t.Helper() var dbs []*NetworkDB for i := 0; i < num; i++ { localConfig := *conf localConfig.Hostname = fmt.Sprintf("%s%d", namePrefix, i+1) localConfig.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) localConfig.BindPort = int(atomic.AddInt32(&dbPort, 1)) db := launchNode(t, localConfig) if i != 0 { assert.Check(t, db.Join([]string{fmt.Sprintf("localhost:%d", db.config.BindPort-1)})) } dbs = append(dbs, db) } // Wait till the cluster creation is successful check := func(t poll.LogT) poll.Result { // Check that the cluster is properly created for i := 0; i < num; i++ { if num != len(dbs[i].ClusterPeers()) { return poll.Continue("%s:Waiting for cluster peers to be established", dbs[i].config.Hostname) } } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(2*time.Second), poll.WithTimeout(20*time.Second)) return dbs } func closeNetworkDBInstances(t *testing.T, dbs []*NetworkDB) { t.Helper() log.Print("Closing DB instances...") for _, db := range dbs { db.Close() } } func (db *NetworkDB) verifyNodeExistence(t *testing.T, node string, present bool) { t.Helper() for i := 0; i < 80; i++ { db.RLock() _, ok := db.nodes[node] db.RUnlock() if present && ok { return } if !present && !ok { return } time.Sleep(50 * time.Millisecond) } t.Errorf("%v(%v): Node existence verification for node %s failed", db.config.Hostname, db.config.NodeID, node) } func (db *NetworkDB) verifyNetworkExistence(t *testing.T, node string, id string, present bool) { t.Helper() const sleepInterval = 50 * time.Millisecond var maxRetries int64 if dl, ok := t.Deadline(); ok { maxRetries = int64(time.Until(dl) / sleepInterval) } else { maxRetries = 80 } for i := int64(0); i < maxRetries; i++ { db.RLock() nn, nnok := db.networks[node] db.RUnlock() if nnok { n, ok := nn[id] if present && ok { return } if !present && ((ok && n.leaving) || !ok) { return } } time.Sleep(sleepInterval) } t.Error("Network existence verification failed") } func (db *NetworkDB) verifyEntryExistence(t *testing.T, tname, nid, key, value string, present bool) { t.Helper() n := 80 for i := 0; i < n; i++ { entry, err := db.getEntry(tname, nid, key) if present && err == nil && string(entry.value) == value { return } if !present && ((err == nil && entry.deleting) || (err != nil)) { return } if i == n-1 && !present && err != nil { return } time.Sleep(50 * time.Millisecond) } t.Errorf("Entry existence verification test failed for %v(%v)", db.config.Hostname, db.config.NodeID) } func testWatch(t *testing.T, ch chan events.Event, ev interface{}, tname, nid, key, value string) { t.Helper() select { case rcvdEv := <-ch: assert.Check(t, is.Equal(fmt.Sprintf("%T", rcvdEv), fmt.Sprintf("%T", ev))) switch typ := rcvdEv.(type) { case CreateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case UpdateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case DeleteEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) } case <-time.After(time.Second): t.Fail() return } } func TestNetworkDBSimple(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetwork(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetworks(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) n := 10 for i := 1; i <= n; i++ { err := dbs[0].JoinNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].JoinNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), true) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), true) } for i := 1; i <= n; i++ { err := dbs[0].LeaveNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].LeaveNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), false) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), false) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntry(t *testing.T) { dbs := createNetworkDBInstances(t, 3, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[2].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntries(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) n := 10 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].CreateEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i), []byte(fmt.Sprintf("test_value1%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), fmt.Sprintf("test_value1%d", i), true) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } // Verify deletes for i := 1; i <= n; i++ { err = dbs[0].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), "", false) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), "", false) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeLeave(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[0].Close() dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) dbs[1].Close() } func TestNetworkDBWatch(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) ch, cancel := dbs[1].Watch("", "", "") err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) testWatch(t, ch.C, CreateEvent{}, "test_table", "network1", "test_key", "test_value") err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) testWatch(t, ch.C, UpdateEvent{}, "test_table", "network1", "test_key", "test_updated_value") err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) testWatch(t, ch.C, DeleteEvent{}, "test_table", "network1", "test_key", "") cancel() closeNetworkDBInstances(t, dbs) } func TestNetworkDBBulkSync(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) n := 1000 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDMediumCluster(t *testing.T) { n := 5 dbs := createNetworkDBInstances(t, n, "node", DefaultConfig()) for i := 0; i < n; i++ { for j := 0; j < n; j++ { if i == j { continue } dbs[i].verifyNodeExistence(t, dbs[j].config.NodeID, true) } } for i := 0; i < n; i++ { err := dbs[i].JoinNetwork("network1") assert.NilError(t, err) } for i := 0; i < n; i++ { for j := 0; j < n; j++ { dbs[i].verifyNetworkExistence(t, dbs[j].config.NodeID, "network1", true) } } err := dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) } err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) } err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) } for i := 1; i < n; i++ { _, err = dbs[i].GetEntry("test_table", "network1", "test_key") assert.Check(t, is.ErrorContains(err, "")) assert.Check(t, is.Contains(err.Error(), "deleted and pending garbage collection"), err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeJoinLeaveIteration(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) var ( dbIndex int32 staleNetworkTime [2]serf.LamportTime expectNodeCount int network = "network1" ) dbChangeWitness := func(t poll.LogT) poll.Result { db := dbs[dbIndex] networkTime := db.networkClock.Time() if networkTime <= staleNetworkTime[dbIndex] { return poll.Continue("network time is stale, no change registered yet.") } count := -1 db.Lock() if nodes, ok := db.networkNodes[network]; ok { count = len(nodes) } db.Unlock() if count != expectNodeCount { return poll.Continue("current number of nodes is %d, expect %d.", count, expectNodeCount) } return poll.Success() } // Single node Join/Leave staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbIndex, expectNodeCount = 0, 1 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) dbIndex, expectNodeCount = 0, 0 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) // Multiple nodes Join/Leave staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) // Wait for the propagation on db[0] dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) dbIndex, expectNodeCount = 0, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) if n, ok := dbs[0].networks[dbs[0].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Wait for the propagation on db[1] dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) dbIndex, expectNodeCount = 1, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) if n, ok := dbs[1].networks[dbs[1].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Try a quick leave/join staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) dbIndex, expectNodeCount = 0, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) dbIndex, expectNodeCount = 1, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) closeNetworkDBInstances(t, dbs) } func TestNetworkDBGarbageCollection(t *testing.T) { keysWriteDelete := 5 config := DefaultConfig() config.reapEntryInterval = 30 * time.Second config.StatsPrintPeriod = 15 * time.Second dbs := createNetworkDBInstances(t, 3, "node", config) // 2 Nodes join network err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].CreateEntry("testTable", "network1", "key-"+strconv.Itoa(i), []byte("value")) assert.NilError(t, err) } time.Sleep(time.Second) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].DeleteEntry("testTable", "network1", "key-"+strconv.Itoa(i)) assert.NilError(t, err) } for i := 0; i < 2; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // from this point the timer for the garbage collection started, wait 5 seconds and then join a new node time.Sleep(5 * time.Second) err = dbs[2].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // at this point the entries should had been all deleted time.Sleep(30 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } // make sure that entries are not coming back time.Sleep(15 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } closeNetworkDBInstances(t, dbs) } func TestFindNode(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["active"] = &node{Node: memberlist.Node{Name: "active"}} dbs[0].failedNodes["failed"] = &node{Node: memberlist.Node{Name: "failed"}} dbs[0].leftNodes["left"] = &node{Node: memberlist.Node{Name: "left"}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) n, currState, m := dbs[0].findNode("active") assert.Check(t, n != nil) assert.Check(t, is.Equal("active", n.Name)) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, m != nil) // delete the entry manually delete(m, "active") // test if can be still find n, currState, m = dbs[0].findNode("active") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) n, currState, m = dbs[0].findNode("failed") assert.Check(t, n != nil) assert.Check(t, is.Equal("failed", n.Name)) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, m != nil) // find and remove n, currState, m = dbs[0].findNode("left") assert.Check(t, n != nil) assert.Check(t, is.Equal("left", n.Name)) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, m != nil) delete(m, "left") n, currState, m = dbs[0].findNode("left") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) closeNetworkDBInstances(t, dbs) } func TestChangeNodeState(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1"}} dbs[0].nodes["node2"] = &node{Node: memberlist.Node{Name: "node2"}} dbs[0].nodes["node3"] = &node{Node: memberlist.Node{Name: "node3"}} // active nodes is 4 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) n, currState, m := dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) // node1 to failed dbs[0].changeNodeState("node1", nodeFailedState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // node1 back to active dbs[0].changeNodeState("node1", nodeActiveState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, is.Equal(time.Duration(0), n.reapTime)) // node1 to left dbs[0].changeNodeState("node1", nodeLeftState) dbs[0].changeNodeState("node2", nodeLeftState) dbs[0].changeNodeState("node3", nodeLeftState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node2") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node2", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node3") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node3", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 1)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestNodeReincarnation(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1", Addr: net.ParseIP("192.168.1.1")}} dbs[0].leftNodes["node2"] = &node{Node: memberlist.Node{Name: "node2", Addr: net.ParseIP("192.168.1.2")}} dbs[0].failedNodes["node3"] = &node{Node: memberlist.Node{Name: "node3", Addr: net.ParseIP("192.168.1.3")}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) b := dbs[0].purgeReincarnation(&memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}) assert.Check(t, b) dbs[0].nodes["node4"] = &node{Node: memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.2")}) assert.Check(t, b) dbs[0].nodes["node5"] = &node{Node: memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.3")}) assert.Check(t, b) dbs[0].nodes["node6"] = &node{Node: memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.10")}) assert.Check(t, !b) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestParallelCreate(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestParallelDelete(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) assert.NilError(t, err) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].DeleteEntry("testTable", "testNetwork", "key") if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestNetworkDBIslands(t *testing.T) { pollTimeout := func() time.Duration { const defaultTimeout = 120 * time.Second dl, ok := t.Deadline() if !ok { return defaultTimeout } if d := time.Until(dl); d <= defaultTimeout { return d } return defaultTimeout } logrus.SetLevel(logrus.DebugLevel) conf := DefaultConfig() // Shorten durations to speed up test execution. conf.rejoinClusterDuration = conf.rejoinClusterDuration / 10 conf.rejoinClusterInterval = conf.rejoinClusterInterval / 10 dbs := createNetworkDBInstances(t, 5, "node", conf) // Get the node IP used currently node := dbs[0].nodes[dbs[0].config.NodeID] baseIPStr := node.Addr.String() // Node 0,1,2 are going to be the 3 bootstrap nodes members := []string{fmt.Sprintf("%s:%d", baseIPStr, dbs[0].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[1].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[2].config.BindPort)} // Rejoining will update the list of the bootstrap members for i := 3; i < 5; i++ { t.Logf("Re-joining: %d", i) assert.Check(t, dbs[i].Join(members)) } // Now the 3 bootstrap nodes will cleanly leave, and will be properly removed from the other 2 nodes for i := 0; i < 3; i++ { logrus.Infof("node %d leaving", i) dbs[i].Close() } checkDBs := make(map[string]*NetworkDB) for i := 3; i < 5; i++ { db := dbs[i] checkDBs[db.config.Hostname] = db } // Give some time to let the system propagate the messages and free up the ports check := func(t poll.LogT) poll.Result { // Verify that the nodes are actually all gone and marked appropiately for name, db := range checkDBs { db.RLock() if (len(db.leftNodes) != 3) || (len(db.failedNodes) != 0) { for name := range db.leftNodes { t.Logf("%s: Node %s left", db.config.Hostname, name) } for name := range db.failedNodes { t.Logf("%s: Node %s failed", db.config.Hostname, name) } db.RUnlock() return poll.Continue("%s:Waiting for all nodes to cleanly leave, left: %d, failed nodes: %d", name, len(db.leftNodes), len(db.failedNodes)) } db.RUnlock() t.Logf("%s: OK", name) delete(checkDBs, name) } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) // Spawn again the first 3 nodes with different names but same IP:port for i := 0; i < 3; i++ { logrus.Infof("node %d coming back", i) dbs[i].config.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) dbs[i] = launchNode(t, *dbs[i].config) } // Give some time for the reconnect routine to run, it runs every 6s. check = func(t poll.LogT) poll.Result { // Verify that the cluster is again all connected. Note that the 3 previous node did not do any join for i := 0; i < 5; i++ { db := dbs[i] db.RLock() if len(db.nodes) != 5 { db.RUnlock() return poll.Continue("%s:Waiting to connect to all nodes", dbs[i].config.Hostname) } if len(db.failedNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting for 0 failedNodes", dbs[i].config.Hostname) } if i < 3 { // nodes from 0 to 3 has no left nodes if len(db.leftNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting to have no leftNodes", dbs[i].config.Hostname) } } else { // nodes from 4 to 5 has the 3 previous left nodes if len(db.leftNodes) != 3 { db.RUnlock() return poll.Continue("%s:Waiting to have 3 leftNodes", dbs[i].config.Hostname) } } db.RUnlock() } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) closeNetworkDBInstances(t, dbs) }
zq-david-wang
d515863abcf456dfd5a1c979a3432ba6763dbad3
49f021ebf00a76d74f5ce158244083e2dfba26fb
Does this map access need to be done in a critical section? I see there's an `RWMutex` embedded in the `NetworkDB` struct, and application code only accesses the `networkNodes` map while holding the lock.
corhere
4,472
moby/moby
42,829
Test: wait for network changes in TestNetworkDBNodeJoinLeaveIteration
Signed-off-by: David Wang <[email protected]> fix #42698 In network node change test, the expected behavior is focused on how many nodes left in networkDB, besides timing issues, things would also go tricky for a leave-then-join sequence, if the check (counting the nodes) happened before the first "leave" event, then the testcase actually miss its target and report PASS without verifying its final result; if the check happened after the 'leave' event, but before the 'join' event, the test would report FAIL unnecessary; This code change would check both the db changes and the node count, it would report PASS only when networkdb has indeed changed and the node count is expected
null
2021-09-08 03:46:14+00:00
2022-07-22 04:55:12+00:00
libnetwork/networkdb/networkdb_test.go
package networkdb import ( "fmt" "log" "net" "os" "strconv" "sync/atomic" "testing" "time" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-events" "github.com/hashicorp/memberlist" "github.com/sirupsen/logrus" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" ) var dbPort int32 = 10000 func TestMain(m *testing.M) { os.WriteFile("/proc/sys/net/ipv6/conf/lo/disable_ipv6", []byte{'0', '\n'}, 0644) logrus.SetLevel(logrus.ErrorLevel) os.Exit(m.Run()) } func launchNode(t *testing.T, conf Config) *NetworkDB { t.Helper() db, err := New(&conf) assert.NilError(t, err) return db } func createNetworkDBInstances(t *testing.T, num int, namePrefix string, conf *Config) []*NetworkDB { t.Helper() var dbs []*NetworkDB for i := 0; i < num; i++ { localConfig := *conf localConfig.Hostname = fmt.Sprintf("%s%d", namePrefix, i+1) localConfig.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) localConfig.BindPort = int(atomic.AddInt32(&dbPort, 1)) db := launchNode(t, localConfig) if i != 0 { assert.Check(t, db.Join([]string{fmt.Sprintf("localhost:%d", db.config.BindPort-1)})) } dbs = append(dbs, db) } // Wait till the cluster creation is successful check := func(t poll.LogT) poll.Result { // Check that the cluster is properly created for i := 0; i < num; i++ { if num != len(dbs[i].ClusterPeers()) { return poll.Continue("%s:Waiting for cluster peers to be established", dbs[i].config.Hostname) } } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(2*time.Second), poll.WithTimeout(20*time.Second)) return dbs } func closeNetworkDBInstances(t *testing.T, dbs []*NetworkDB) { t.Helper() log.Print("Closing DB instances...") for _, db := range dbs { db.Close() } } func (db *NetworkDB) verifyNodeExistence(t *testing.T, node string, present bool) { t.Helper() for i := 0; i < 80; i++ { db.RLock() _, ok := db.nodes[node] db.RUnlock() if present && ok { return } if !present && !ok { return } time.Sleep(50 * time.Millisecond) } t.Errorf("%v(%v): Node existence verification for node %s failed", db.config.Hostname, db.config.NodeID, node) } func (db *NetworkDB) verifyNetworkExistence(t *testing.T, node string, id string, present bool) { t.Helper() const sleepInterval = 50 * time.Millisecond var maxRetries int64 if dl, ok := t.Deadline(); ok { maxRetries = int64(time.Until(dl) / sleepInterval) } else { maxRetries = 80 } for i := int64(0); i < maxRetries; i++ { db.RLock() nn, nnok := db.networks[node] db.RUnlock() if nnok { n, ok := nn[id] if present && ok { return } if !present && ((ok && n.leaving) || !ok) { return } } time.Sleep(sleepInterval) } t.Error("Network existence verification failed") } func (db *NetworkDB) verifyEntryExistence(t *testing.T, tname, nid, key, value string, present bool) { t.Helper() n := 80 for i := 0; i < n; i++ { entry, err := db.getEntry(tname, nid, key) if present && err == nil && string(entry.value) == value { return } if !present && ((err == nil && entry.deleting) || (err != nil)) { return } if i == n-1 && !present && err != nil { return } time.Sleep(50 * time.Millisecond) } t.Errorf("Entry existence verification test failed for %v(%v)", db.config.Hostname, db.config.NodeID) } func testWatch(t *testing.T, ch chan events.Event, ev interface{}, tname, nid, key, value string) { t.Helper() select { case rcvdEv := <-ch: assert.Check(t, is.Equal(fmt.Sprintf("%T", rcvdEv), fmt.Sprintf("%T", ev))) switch typ := rcvdEv.(type) { case CreateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case UpdateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case DeleteEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) } case <-time.After(time.Second): t.Fail() return } } func TestNetworkDBSimple(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetwork(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetworks(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) n := 10 for i := 1; i <= n; i++ { err := dbs[0].JoinNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].JoinNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), true) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), true) } for i := 1; i <= n; i++ { err := dbs[0].LeaveNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].LeaveNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), false) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), false) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntry(t *testing.T) { dbs := createNetworkDBInstances(t, 3, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[2].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntries(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) n := 10 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].CreateEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i), []byte(fmt.Sprintf("test_value1%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), fmt.Sprintf("test_value1%d", i), true) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } // Verify deletes for i := 1; i <= n; i++ { err = dbs[0].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), "", false) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), "", false) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeLeave(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[0].Close() dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) dbs[1].Close() } func TestNetworkDBWatch(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) ch, cancel := dbs[1].Watch("", "", "") err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) testWatch(t, ch.C, CreateEvent{}, "test_table", "network1", "test_key", "test_value") err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) testWatch(t, ch.C, UpdateEvent{}, "test_table", "network1", "test_key", "test_updated_value") err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) testWatch(t, ch.C, DeleteEvent{}, "test_table", "network1", "test_key", "") cancel() closeNetworkDBInstances(t, dbs) } func TestNetworkDBBulkSync(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) n := 1000 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDMediumCluster(t *testing.T) { n := 5 dbs := createNetworkDBInstances(t, n, "node", DefaultConfig()) for i := 0; i < n; i++ { for j := 0; j < n; j++ { if i == j { continue } dbs[i].verifyNodeExistence(t, dbs[j].config.NodeID, true) } } for i := 0; i < n; i++ { err := dbs[i].JoinNetwork("network1") assert.NilError(t, err) } for i := 0; i < n; i++ { for j := 0; j < n; j++ { dbs[i].verifyNetworkExistence(t, dbs[j].config.NodeID, "network1", true) } } err := dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) } err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) } err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) } for i := 1; i < n; i++ { _, err = dbs[i].GetEntry("test_table", "network1", "test_key") assert.Check(t, is.ErrorContains(err, "")) assert.Check(t, is.Contains(err.Error(), "deleted and pending garbage collection"), err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeJoinLeaveIteration(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) // Single node Join/Leave err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) if len(dbs[0].networkNodes["network1"]) != 1 { t.Fatalf("The networkNodes list has to have be 1 instead of %d", len(dbs[0].networkNodes["network1"])) } err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) if len(dbs[0].networkNodes["network1"]) != 0 { t.Fatalf("The networkNodes list has to have be 0 instead of %d", len(dbs[0].networkNodes["network1"])) } // Multiple nodes Join/Leave err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) // Wait for the propagation on db[0] dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) if len(dbs[0].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[0].networkNodes["network1"]), dbs[0].networkNodes["network1"]) } if n, ok := dbs[0].networks[dbs[0].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Wait for the propagation on db[1] dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) if len(dbs[1].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[1].networkNodes["network1"]), dbs[1].networkNodes["network1"]) } if n, ok := dbs[1].networks[dbs[1].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Try a quick leave/join err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) if len(dbs[0].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[0].networkNodes["network1"]), dbs[0].networkNodes["network1"]) } dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) if len(dbs[1].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[1].networkNodes["network1"]), dbs[1].networkNodes["network1"]) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBGarbageCollection(t *testing.T) { keysWriteDelete := 5 config := DefaultConfig() config.reapEntryInterval = 30 * time.Second config.StatsPrintPeriod = 15 * time.Second dbs := createNetworkDBInstances(t, 3, "node", config) // 2 Nodes join network err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].CreateEntry("testTable", "network1", "key-"+strconv.Itoa(i), []byte("value")) assert.NilError(t, err) } time.Sleep(time.Second) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].DeleteEntry("testTable", "network1", "key-"+strconv.Itoa(i)) assert.NilError(t, err) } for i := 0; i < 2; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // from this point the timer for the garbage collection started, wait 5 seconds and then join a new node time.Sleep(5 * time.Second) err = dbs[2].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // at this point the entries should had been all deleted time.Sleep(30 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } // make sure that entries are not coming back time.Sleep(15 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } closeNetworkDBInstances(t, dbs) } func TestFindNode(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["active"] = &node{Node: memberlist.Node{Name: "active"}} dbs[0].failedNodes["failed"] = &node{Node: memberlist.Node{Name: "failed"}} dbs[0].leftNodes["left"] = &node{Node: memberlist.Node{Name: "left"}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) n, currState, m := dbs[0].findNode("active") assert.Check(t, n != nil) assert.Check(t, is.Equal("active", n.Name)) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, m != nil) // delete the entry manually delete(m, "active") // test if can be still find n, currState, m = dbs[0].findNode("active") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) n, currState, m = dbs[0].findNode("failed") assert.Check(t, n != nil) assert.Check(t, is.Equal("failed", n.Name)) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, m != nil) // find and remove n, currState, m = dbs[0].findNode("left") assert.Check(t, n != nil) assert.Check(t, is.Equal("left", n.Name)) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, m != nil) delete(m, "left") n, currState, m = dbs[0].findNode("left") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) closeNetworkDBInstances(t, dbs) } func TestChangeNodeState(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1"}} dbs[0].nodes["node2"] = &node{Node: memberlist.Node{Name: "node2"}} dbs[0].nodes["node3"] = &node{Node: memberlist.Node{Name: "node3"}} // active nodes is 4 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) n, currState, m := dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) // node1 to failed dbs[0].changeNodeState("node1", nodeFailedState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // node1 back to active dbs[0].changeNodeState("node1", nodeActiveState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, is.Equal(time.Duration(0), n.reapTime)) // node1 to left dbs[0].changeNodeState("node1", nodeLeftState) dbs[0].changeNodeState("node2", nodeLeftState) dbs[0].changeNodeState("node3", nodeLeftState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node2") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node2", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node3") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node3", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 1)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestNodeReincarnation(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1", Addr: net.ParseIP("192.168.1.1")}} dbs[0].leftNodes["node2"] = &node{Node: memberlist.Node{Name: "node2", Addr: net.ParseIP("192.168.1.2")}} dbs[0].failedNodes["node3"] = &node{Node: memberlist.Node{Name: "node3", Addr: net.ParseIP("192.168.1.3")}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) b := dbs[0].purgeReincarnation(&memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}) assert.Check(t, b) dbs[0].nodes["node4"] = &node{Node: memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.2")}) assert.Check(t, b) dbs[0].nodes["node5"] = &node{Node: memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.3")}) assert.Check(t, b) dbs[0].nodes["node6"] = &node{Node: memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.10")}) assert.Check(t, !b) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestParallelCreate(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestParallelDelete(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) assert.NilError(t, err) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].DeleteEntry("testTable", "testNetwork", "key") if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestNetworkDBIslands(t *testing.T) { pollTimeout := func() time.Duration { const defaultTimeout = 120 * time.Second dl, ok := t.Deadline() if !ok { return defaultTimeout } if d := time.Until(dl); d <= defaultTimeout { return d } return defaultTimeout } logrus.SetLevel(logrus.DebugLevel) conf := DefaultConfig() // Shorten durations to speed up test execution. conf.rejoinClusterDuration = conf.rejoinClusterDuration / 10 conf.rejoinClusterInterval = conf.rejoinClusterInterval / 10 dbs := createNetworkDBInstances(t, 5, "node", conf) // Get the node IP used currently node := dbs[0].nodes[dbs[0].config.NodeID] baseIPStr := node.Addr.String() // Node 0,1,2 are going to be the 3 bootstrap nodes members := []string{fmt.Sprintf("%s:%d", baseIPStr, dbs[0].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[1].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[2].config.BindPort)} // Rejoining will update the list of the bootstrap members for i := 3; i < 5; i++ { t.Logf("Re-joining: %d", i) assert.Check(t, dbs[i].Join(members)) } // Now the 3 bootstrap nodes will cleanly leave, and will be properly removed from the other 2 nodes for i := 0; i < 3; i++ { logrus.Infof("node %d leaving", i) dbs[i].Close() } checkDBs := make(map[string]*NetworkDB) for i := 3; i < 5; i++ { db := dbs[i] checkDBs[db.config.Hostname] = db } // Give some time to let the system propagate the messages and free up the ports check := func(t poll.LogT) poll.Result { // Verify that the nodes are actually all gone and marked appropiately for name, db := range checkDBs { db.RLock() if (len(db.leftNodes) != 3) || (len(db.failedNodes) != 0) { for name := range db.leftNodes { t.Logf("%s: Node %s left", db.config.Hostname, name) } for name := range db.failedNodes { t.Logf("%s: Node %s failed", db.config.Hostname, name) } db.RUnlock() return poll.Continue("%s:Waiting for all nodes to cleanly leave, left: %d, failed nodes: %d", name, len(db.leftNodes), len(db.failedNodes)) } db.RUnlock() t.Logf("%s: OK", name) delete(checkDBs, name) } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) // Spawn again the first 3 nodes with different names but same IP:port for i := 0; i < 3; i++ { logrus.Infof("node %d coming back", i) dbs[i].config.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) dbs[i] = launchNode(t, *dbs[i].config) } // Give some time for the reconnect routine to run, it runs every 6s. check = func(t poll.LogT) poll.Result { // Verify that the cluster is again all connected. Note that the 3 previous node did not do any join for i := 0; i < 5; i++ { db := dbs[i] db.RLock() if len(db.nodes) != 5 { db.RUnlock() return poll.Continue("%s:Waiting to connect to all nodes", dbs[i].config.Hostname) } if len(db.failedNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting for 0 failedNodes", dbs[i].config.Hostname) } if i < 3 { // nodes from 0 to 3 has no left nodes if len(db.leftNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting to have no leftNodes", dbs[i].config.Hostname) } } else { // nodes from 4 to 5 has the 3 previous left nodes if len(db.leftNodes) != 3 { db.RUnlock() return poll.Continue("%s:Waiting to have 3 leftNodes", dbs[i].config.Hostname) } } db.RUnlock() } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) closeNetworkDBInstances(t, dbs) }
package networkdb import ( "fmt" "log" "net" "os" "strconv" "sync/atomic" "testing" "time" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-events" "github.com/hashicorp/memberlist" "github.com/hashicorp/serf/serf" "github.com/sirupsen/logrus" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" ) var dbPort int32 = 10000 func TestMain(m *testing.M) { os.WriteFile("/proc/sys/net/ipv6/conf/lo/disable_ipv6", []byte{'0', '\n'}, 0644) logrus.SetLevel(logrus.ErrorLevel) os.Exit(m.Run()) } func launchNode(t *testing.T, conf Config) *NetworkDB { t.Helper() db, err := New(&conf) assert.NilError(t, err) return db } func createNetworkDBInstances(t *testing.T, num int, namePrefix string, conf *Config) []*NetworkDB { t.Helper() var dbs []*NetworkDB for i := 0; i < num; i++ { localConfig := *conf localConfig.Hostname = fmt.Sprintf("%s%d", namePrefix, i+1) localConfig.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) localConfig.BindPort = int(atomic.AddInt32(&dbPort, 1)) db := launchNode(t, localConfig) if i != 0 { assert.Check(t, db.Join([]string{fmt.Sprintf("localhost:%d", db.config.BindPort-1)})) } dbs = append(dbs, db) } // Wait till the cluster creation is successful check := func(t poll.LogT) poll.Result { // Check that the cluster is properly created for i := 0; i < num; i++ { if num != len(dbs[i].ClusterPeers()) { return poll.Continue("%s:Waiting for cluster peers to be established", dbs[i].config.Hostname) } } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(2*time.Second), poll.WithTimeout(20*time.Second)) return dbs } func closeNetworkDBInstances(t *testing.T, dbs []*NetworkDB) { t.Helper() log.Print("Closing DB instances...") for _, db := range dbs { db.Close() } } func (db *NetworkDB) verifyNodeExistence(t *testing.T, node string, present bool) { t.Helper() for i := 0; i < 80; i++ { db.RLock() _, ok := db.nodes[node] db.RUnlock() if present && ok { return } if !present && !ok { return } time.Sleep(50 * time.Millisecond) } t.Errorf("%v(%v): Node existence verification for node %s failed", db.config.Hostname, db.config.NodeID, node) } func (db *NetworkDB) verifyNetworkExistence(t *testing.T, node string, id string, present bool) { t.Helper() const sleepInterval = 50 * time.Millisecond var maxRetries int64 if dl, ok := t.Deadline(); ok { maxRetries = int64(time.Until(dl) / sleepInterval) } else { maxRetries = 80 } for i := int64(0); i < maxRetries; i++ { db.RLock() nn, nnok := db.networks[node] db.RUnlock() if nnok { n, ok := nn[id] if present && ok { return } if !present && ((ok && n.leaving) || !ok) { return } } time.Sleep(sleepInterval) } t.Error("Network existence verification failed") } func (db *NetworkDB) verifyEntryExistence(t *testing.T, tname, nid, key, value string, present bool) { t.Helper() n := 80 for i := 0; i < n; i++ { entry, err := db.getEntry(tname, nid, key) if present && err == nil && string(entry.value) == value { return } if !present && ((err == nil && entry.deleting) || (err != nil)) { return } if i == n-1 && !present && err != nil { return } time.Sleep(50 * time.Millisecond) } t.Errorf("Entry existence verification test failed for %v(%v)", db.config.Hostname, db.config.NodeID) } func testWatch(t *testing.T, ch chan events.Event, ev interface{}, tname, nid, key, value string) { t.Helper() select { case rcvdEv := <-ch: assert.Check(t, is.Equal(fmt.Sprintf("%T", rcvdEv), fmt.Sprintf("%T", ev))) switch typ := rcvdEv.(type) { case CreateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case UpdateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case DeleteEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) } case <-time.After(time.Second): t.Fail() return } } func TestNetworkDBSimple(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetwork(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetworks(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) n := 10 for i := 1; i <= n; i++ { err := dbs[0].JoinNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].JoinNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), true) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), true) } for i := 1; i <= n; i++ { err := dbs[0].LeaveNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].LeaveNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), false) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), false) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntry(t *testing.T) { dbs := createNetworkDBInstances(t, 3, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[2].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntries(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) n := 10 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].CreateEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i), []byte(fmt.Sprintf("test_value1%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), fmt.Sprintf("test_value1%d", i), true) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } // Verify deletes for i := 1; i <= n; i++ { err = dbs[0].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), "", false) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), "", false) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeLeave(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[0].Close() dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) dbs[1].Close() } func TestNetworkDBWatch(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) ch, cancel := dbs[1].Watch("", "", "") err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) testWatch(t, ch.C, CreateEvent{}, "test_table", "network1", "test_key", "test_value") err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) testWatch(t, ch.C, UpdateEvent{}, "test_table", "network1", "test_key", "test_updated_value") err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) testWatch(t, ch.C, DeleteEvent{}, "test_table", "network1", "test_key", "") cancel() closeNetworkDBInstances(t, dbs) } func TestNetworkDBBulkSync(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) n := 1000 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDMediumCluster(t *testing.T) { n := 5 dbs := createNetworkDBInstances(t, n, "node", DefaultConfig()) for i := 0; i < n; i++ { for j := 0; j < n; j++ { if i == j { continue } dbs[i].verifyNodeExistence(t, dbs[j].config.NodeID, true) } } for i := 0; i < n; i++ { err := dbs[i].JoinNetwork("network1") assert.NilError(t, err) } for i := 0; i < n; i++ { for j := 0; j < n; j++ { dbs[i].verifyNetworkExistence(t, dbs[j].config.NodeID, "network1", true) } } err := dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) } err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) } err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) } for i := 1; i < n; i++ { _, err = dbs[i].GetEntry("test_table", "network1", "test_key") assert.Check(t, is.ErrorContains(err, "")) assert.Check(t, is.Contains(err.Error(), "deleted and pending garbage collection"), err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeJoinLeaveIteration(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) var ( dbIndex int32 staleNetworkTime [2]serf.LamportTime expectNodeCount int network = "network1" ) dbChangeWitness := func(t poll.LogT) poll.Result { db := dbs[dbIndex] networkTime := db.networkClock.Time() if networkTime <= staleNetworkTime[dbIndex] { return poll.Continue("network time is stale, no change registered yet.") } count := -1 db.Lock() if nodes, ok := db.networkNodes[network]; ok { count = len(nodes) } db.Unlock() if count != expectNodeCount { return poll.Continue("current number of nodes is %d, expect %d.", count, expectNodeCount) } return poll.Success() } // Single node Join/Leave staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbIndex, expectNodeCount = 0, 1 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) dbIndex, expectNodeCount = 0, 0 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) // Multiple nodes Join/Leave staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) // Wait for the propagation on db[0] dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) dbIndex, expectNodeCount = 0, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) if n, ok := dbs[0].networks[dbs[0].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Wait for the propagation on db[1] dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) dbIndex, expectNodeCount = 1, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) if n, ok := dbs[1].networks[dbs[1].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Try a quick leave/join staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) dbIndex, expectNodeCount = 0, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) dbIndex, expectNodeCount = 1, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) closeNetworkDBInstances(t, dbs) } func TestNetworkDBGarbageCollection(t *testing.T) { keysWriteDelete := 5 config := DefaultConfig() config.reapEntryInterval = 30 * time.Second config.StatsPrintPeriod = 15 * time.Second dbs := createNetworkDBInstances(t, 3, "node", config) // 2 Nodes join network err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].CreateEntry("testTable", "network1", "key-"+strconv.Itoa(i), []byte("value")) assert.NilError(t, err) } time.Sleep(time.Second) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].DeleteEntry("testTable", "network1", "key-"+strconv.Itoa(i)) assert.NilError(t, err) } for i := 0; i < 2; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // from this point the timer for the garbage collection started, wait 5 seconds and then join a new node time.Sleep(5 * time.Second) err = dbs[2].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // at this point the entries should had been all deleted time.Sleep(30 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } // make sure that entries are not coming back time.Sleep(15 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } closeNetworkDBInstances(t, dbs) } func TestFindNode(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["active"] = &node{Node: memberlist.Node{Name: "active"}} dbs[0].failedNodes["failed"] = &node{Node: memberlist.Node{Name: "failed"}} dbs[0].leftNodes["left"] = &node{Node: memberlist.Node{Name: "left"}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) n, currState, m := dbs[0].findNode("active") assert.Check(t, n != nil) assert.Check(t, is.Equal("active", n.Name)) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, m != nil) // delete the entry manually delete(m, "active") // test if can be still find n, currState, m = dbs[0].findNode("active") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) n, currState, m = dbs[0].findNode("failed") assert.Check(t, n != nil) assert.Check(t, is.Equal("failed", n.Name)) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, m != nil) // find and remove n, currState, m = dbs[0].findNode("left") assert.Check(t, n != nil) assert.Check(t, is.Equal("left", n.Name)) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, m != nil) delete(m, "left") n, currState, m = dbs[0].findNode("left") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) closeNetworkDBInstances(t, dbs) } func TestChangeNodeState(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1"}} dbs[0].nodes["node2"] = &node{Node: memberlist.Node{Name: "node2"}} dbs[0].nodes["node3"] = &node{Node: memberlist.Node{Name: "node3"}} // active nodes is 4 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) n, currState, m := dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) // node1 to failed dbs[0].changeNodeState("node1", nodeFailedState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // node1 back to active dbs[0].changeNodeState("node1", nodeActiveState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, is.Equal(time.Duration(0), n.reapTime)) // node1 to left dbs[0].changeNodeState("node1", nodeLeftState) dbs[0].changeNodeState("node2", nodeLeftState) dbs[0].changeNodeState("node3", nodeLeftState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node2") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node2", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node3") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node3", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 1)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestNodeReincarnation(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1", Addr: net.ParseIP("192.168.1.1")}} dbs[0].leftNodes["node2"] = &node{Node: memberlist.Node{Name: "node2", Addr: net.ParseIP("192.168.1.2")}} dbs[0].failedNodes["node3"] = &node{Node: memberlist.Node{Name: "node3", Addr: net.ParseIP("192.168.1.3")}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) b := dbs[0].purgeReincarnation(&memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}) assert.Check(t, b) dbs[0].nodes["node4"] = &node{Node: memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.2")}) assert.Check(t, b) dbs[0].nodes["node5"] = &node{Node: memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.3")}) assert.Check(t, b) dbs[0].nodes["node6"] = &node{Node: memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.10")}) assert.Check(t, !b) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestParallelCreate(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestParallelDelete(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) assert.NilError(t, err) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].DeleteEntry("testTable", "testNetwork", "key") if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestNetworkDBIslands(t *testing.T) { pollTimeout := func() time.Duration { const defaultTimeout = 120 * time.Second dl, ok := t.Deadline() if !ok { return defaultTimeout } if d := time.Until(dl); d <= defaultTimeout { return d } return defaultTimeout } logrus.SetLevel(logrus.DebugLevel) conf := DefaultConfig() // Shorten durations to speed up test execution. conf.rejoinClusterDuration = conf.rejoinClusterDuration / 10 conf.rejoinClusterInterval = conf.rejoinClusterInterval / 10 dbs := createNetworkDBInstances(t, 5, "node", conf) // Get the node IP used currently node := dbs[0].nodes[dbs[0].config.NodeID] baseIPStr := node.Addr.String() // Node 0,1,2 are going to be the 3 bootstrap nodes members := []string{fmt.Sprintf("%s:%d", baseIPStr, dbs[0].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[1].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[2].config.BindPort)} // Rejoining will update the list of the bootstrap members for i := 3; i < 5; i++ { t.Logf("Re-joining: %d", i) assert.Check(t, dbs[i].Join(members)) } // Now the 3 bootstrap nodes will cleanly leave, and will be properly removed from the other 2 nodes for i := 0; i < 3; i++ { logrus.Infof("node %d leaving", i) dbs[i].Close() } checkDBs := make(map[string]*NetworkDB) for i := 3; i < 5; i++ { db := dbs[i] checkDBs[db.config.Hostname] = db } // Give some time to let the system propagate the messages and free up the ports check := func(t poll.LogT) poll.Result { // Verify that the nodes are actually all gone and marked appropiately for name, db := range checkDBs { db.RLock() if (len(db.leftNodes) != 3) || (len(db.failedNodes) != 0) { for name := range db.leftNodes { t.Logf("%s: Node %s left", db.config.Hostname, name) } for name := range db.failedNodes { t.Logf("%s: Node %s failed", db.config.Hostname, name) } db.RUnlock() return poll.Continue("%s:Waiting for all nodes to cleanly leave, left: %d, failed nodes: %d", name, len(db.leftNodes), len(db.failedNodes)) } db.RUnlock() t.Logf("%s: OK", name) delete(checkDBs, name) } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) // Spawn again the first 3 nodes with different names but same IP:port for i := 0; i < 3; i++ { logrus.Infof("node %d coming back", i) dbs[i].config.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) dbs[i] = launchNode(t, *dbs[i].config) } // Give some time for the reconnect routine to run, it runs every 6s. check = func(t poll.LogT) poll.Result { // Verify that the cluster is again all connected. Note that the 3 previous node did not do any join for i := 0; i < 5; i++ { db := dbs[i] db.RLock() if len(db.nodes) != 5 { db.RUnlock() return poll.Continue("%s:Waiting to connect to all nodes", dbs[i].config.Hostname) } if len(db.failedNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting for 0 failedNodes", dbs[i].config.Hostname) } if i < 3 { // nodes from 0 to 3 has no left nodes if len(db.leftNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting to have no leftNodes", dbs[i].config.Hostname) } } else { // nodes from 4 to 5 has the 3 previous left nodes if len(db.leftNodes) != 3 { db.RUnlock() return poll.Continue("%s:Waiting to have 3 leftNodes", dbs[i].config.Hostname) } } db.RUnlock() } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) closeNetworkDBInstances(t, dbs) }
zq-david-wang
d515863abcf456dfd5a1c979a3432ba6763dbad3
49f021ebf00a76d74f5ce158244083e2dfba26fb
```suggestion func (db *NetworkDB) witnessNetworkNodeChanges(t *testing.T, ntime serf.LamportTime, network string, count int) bool { t.Helper() ```
corhere
4,473
moby/moby
42,829
Test: wait for network changes in TestNetworkDBNodeJoinLeaveIteration
Signed-off-by: David Wang <[email protected]> fix #42698 In network node change test, the expected behavior is focused on how many nodes left in networkDB, besides timing issues, things would also go tricky for a leave-then-join sequence, if the check (counting the nodes) happened before the first "leave" event, then the testcase actually miss its target and report PASS without verifying its final result; if the check happened after the 'leave' event, but before the 'join' event, the test would report FAIL unnecessary; This code change would check both the db changes and the node count, it would report PASS only when networkdb has indeed changed and the node count is expected
null
2021-09-08 03:46:14+00:00
2022-07-22 04:55:12+00:00
libnetwork/networkdb/networkdb_test.go
package networkdb import ( "fmt" "log" "net" "os" "strconv" "sync/atomic" "testing" "time" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-events" "github.com/hashicorp/memberlist" "github.com/sirupsen/logrus" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" ) var dbPort int32 = 10000 func TestMain(m *testing.M) { os.WriteFile("/proc/sys/net/ipv6/conf/lo/disable_ipv6", []byte{'0', '\n'}, 0644) logrus.SetLevel(logrus.ErrorLevel) os.Exit(m.Run()) } func launchNode(t *testing.T, conf Config) *NetworkDB { t.Helper() db, err := New(&conf) assert.NilError(t, err) return db } func createNetworkDBInstances(t *testing.T, num int, namePrefix string, conf *Config) []*NetworkDB { t.Helper() var dbs []*NetworkDB for i := 0; i < num; i++ { localConfig := *conf localConfig.Hostname = fmt.Sprintf("%s%d", namePrefix, i+1) localConfig.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) localConfig.BindPort = int(atomic.AddInt32(&dbPort, 1)) db := launchNode(t, localConfig) if i != 0 { assert.Check(t, db.Join([]string{fmt.Sprintf("localhost:%d", db.config.BindPort-1)})) } dbs = append(dbs, db) } // Wait till the cluster creation is successful check := func(t poll.LogT) poll.Result { // Check that the cluster is properly created for i := 0; i < num; i++ { if num != len(dbs[i].ClusterPeers()) { return poll.Continue("%s:Waiting for cluster peers to be established", dbs[i].config.Hostname) } } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(2*time.Second), poll.WithTimeout(20*time.Second)) return dbs } func closeNetworkDBInstances(t *testing.T, dbs []*NetworkDB) { t.Helper() log.Print("Closing DB instances...") for _, db := range dbs { db.Close() } } func (db *NetworkDB) verifyNodeExistence(t *testing.T, node string, present bool) { t.Helper() for i := 0; i < 80; i++ { db.RLock() _, ok := db.nodes[node] db.RUnlock() if present && ok { return } if !present && !ok { return } time.Sleep(50 * time.Millisecond) } t.Errorf("%v(%v): Node existence verification for node %s failed", db.config.Hostname, db.config.NodeID, node) } func (db *NetworkDB) verifyNetworkExistence(t *testing.T, node string, id string, present bool) { t.Helper() const sleepInterval = 50 * time.Millisecond var maxRetries int64 if dl, ok := t.Deadline(); ok { maxRetries = int64(time.Until(dl) / sleepInterval) } else { maxRetries = 80 } for i := int64(0); i < maxRetries; i++ { db.RLock() nn, nnok := db.networks[node] db.RUnlock() if nnok { n, ok := nn[id] if present && ok { return } if !present && ((ok && n.leaving) || !ok) { return } } time.Sleep(sleepInterval) } t.Error("Network existence verification failed") } func (db *NetworkDB) verifyEntryExistence(t *testing.T, tname, nid, key, value string, present bool) { t.Helper() n := 80 for i := 0; i < n; i++ { entry, err := db.getEntry(tname, nid, key) if present && err == nil && string(entry.value) == value { return } if !present && ((err == nil && entry.deleting) || (err != nil)) { return } if i == n-1 && !present && err != nil { return } time.Sleep(50 * time.Millisecond) } t.Errorf("Entry existence verification test failed for %v(%v)", db.config.Hostname, db.config.NodeID) } func testWatch(t *testing.T, ch chan events.Event, ev interface{}, tname, nid, key, value string) { t.Helper() select { case rcvdEv := <-ch: assert.Check(t, is.Equal(fmt.Sprintf("%T", rcvdEv), fmt.Sprintf("%T", ev))) switch typ := rcvdEv.(type) { case CreateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case UpdateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case DeleteEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) } case <-time.After(time.Second): t.Fail() return } } func TestNetworkDBSimple(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetwork(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetworks(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) n := 10 for i := 1; i <= n; i++ { err := dbs[0].JoinNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].JoinNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), true) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), true) } for i := 1; i <= n; i++ { err := dbs[0].LeaveNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].LeaveNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), false) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), false) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntry(t *testing.T) { dbs := createNetworkDBInstances(t, 3, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[2].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntries(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) n := 10 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].CreateEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i), []byte(fmt.Sprintf("test_value1%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), fmt.Sprintf("test_value1%d", i), true) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } // Verify deletes for i := 1; i <= n; i++ { err = dbs[0].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), "", false) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), "", false) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeLeave(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[0].Close() dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) dbs[1].Close() } func TestNetworkDBWatch(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) ch, cancel := dbs[1].Watch("", "", "") err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) testWatch(t, ch.C, CreateEvent{}, "test_table", "network1", "test_key", "test_value") err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) testWatch(t, ch.C, UpdateEvent{}, "test_table", "network1", "test_key", "test_updated_value") err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) testWatch(t, ch.C, DeleteEvent{}, "test_table", "network1", "test_key", "") cancel() closeNetworkDBInstances(t, dbs) } func TestNetworkDBBulkSync(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) n := 1000 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDMediumCluster(t *testing.T) { n := 5 dbs := createNetworkDBInstances(t, n, "node", DefaultConfig()) for i := 0; i < n; i++ { for j := 0; j < n; j++ { if i == j { continue } dbs[i].verifyNodeExistence(t, dbs[j].config.NodeID, true) } } for i := 0; i < n; i++ { err := dbs[i].JoinNetwork("network1") assert.NilError(t, err) } for i := 0; i < n; i++ { for j := 0; j < n; j++ { dbs[i].verifyNetworkExistence(t, dbs[j].config.NodeID, "network1", true) } } err := dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) } err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) } err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) } for i := 1; i < n; i++ { _, err = dbs[i].GetEntry("test_table", "network1", "test_key") assert.Check(t, is.ErrorContains(err, "")) assert.Check(t, is.Contains(err.Error(), "deleted and pending garbage collection"), err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeJoinLeaveIteration(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) // Single node Join/Leave err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) if len(dbs[0].networkNodes["network1"]) != 1 { t.Fatalf("The networkNodes list has to have be 1 instead of %d", len(dbs[0].networkNodes["network1"])) } err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) if len(dbs[0].networkNodes["network1"]) != 0 { t.Fatalf("The networkNodes list has to have be 0 instead of %d", len(dbs[0].networkNodes["network1"])) } // Multiple nodes Join/Leave err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) // Wait for the propagation on db[0] dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) if len(dbs[0].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[0].networkNodes["network1"]), dbs[0].networkNodes["network1"]) } if n, ok := dbs[0].networks[dbs[0].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Wait for the propagation on db[1] dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) if len(dbs[1].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[1].networkNodes["network1"]), dbs[1].networkNodes["network1"]) } if n, ok := dbs[1].networks[dbs[1].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Try a quick leave/join err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) if len(dbs[0].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[0].networkNodes["network1"]), dbs[0].networkNodes["network1"]) } dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) if len(dbs[1].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[1].networkNodes["network1"]), dbs[1].networkNodes["network1"]) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBGarbageCollection(t *testing.T) { keysWriteDelete := 5 config := DefaultConfig() config.reapEntryInterval = 30 * time.Second config.StatsPrintPeriod = 15 * time.Second dbs := createNetworkDBInstances(t, 3, "node", config) // 2 Nodes join network err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].CreateEntry("testTable", "network1", "key-"+strconv.Itoa(i), []byte("value")) assert.NilError(t, err) } time.Sleep(time.Second) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].DeleteEntry("testTable", "network1", "key-"+strconv.Itoa(i)) assert.NilError(t, err) } for i := 0; i < 2; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // from this point the timer for the garbage collection started, wait 5 seconds and then join a new node time.Sleep(5 * time.Second) err = dbs[2].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // at this point the entries should had been all deleted time.Sleep(30 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } // make sure that entries are not coming back time.Sleep(15 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } closeNetworkDBInstances(t, dbs) } func TestFindNode(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["active"] = &node{Node: memberlist.Node{Name: "active"}} dbs[0].failedNodes["failed"] = &node{Node: memberlist.Node{Name: "failed"}} dbs[0].leftNodes["left"] = &node{Node: memberlist.Node{Name: "left"}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) n, currState, m := dbs[0].findNode("active") assert.Check(t, n != nil) assert.Check(t, is.Equal("active", n.Name)) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, m != nil) // delete the entry manually delete(m, "active") // test if can be still find n, currState, m = dbs[0].findNode("active") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) n, currState, m = dbs[0].findNode("failed") assert.Check(t, n != nil) assert.Check(t, is.Equal("failed", n.Name)) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, m != nil) // find and remove n, currState, m = dbs[0].findNode("left") assert.Check(t, n != nil) assert.Check(t, is.Equal("left", n.Name)) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, m != nil) delete(m, "left") n, currState, m = dbs[0].findNode("left") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) closeNetworkDBInstances(t, dbs) } func TestChangeNodeState(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1"}} dbs[0].nodes["node2"] = &node{Node: memberlist.Node{Name: "node2"}} dbs[0].nodes["node3"] = &node{Node: memberlist.Node{Name: "node3"}} // active nodes is 4 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) n, currState, m := dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) // node1 to failed dbs[0].changeNodeState("node1", nodeFailedState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // node1 back to active dbs[0].changeNodeState("node1", nodeActiveState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, is.Equal(time.Duration(0), n.reapTime)) // node1 to left dbs[0].changeNodeState("node1", nodeLeftState) dbs[0].changeNodeState("node2", nodeLeftState) dbs[0].changeNodeState("node3", nodeLeftState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node2") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node2", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node3") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node3", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 1)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestNodeReincarnation(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1", Addr: net.ParseIP("192.168.1.1")}} dbs[0].leftNodes["node2"] = &node{Node: memberlist.Node{Name: "node2", Addr: net.ParseIP("192.168.1.2")}} dbs[0].failedNodes["node3"] = &node{Node: memberlist.Node{Name: "node3", Addr: net.ParseIP("192.168.1.3")}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) b := dbs[0].purgeReincarnation(&memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}) assert.Check(t, b) dbs[0].nodes["node4"] = &node{Node: memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.2")}) assert.Check(t, b) dbs[0].nodes["node5"] = &node{Node: memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.3")}) assert.Check(t, b) dbs[0].nodes["node6"] = &node{Node: memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.10")}) assert.Check(t, !b) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestParallelCreate(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestParallelDelete(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) assert.NilError(t, err) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].DeleteEntry("testTable", "testNetwork", "key") if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestNetworkDBIslands(t *testing.T) { pollTimeout := func() time.Duration { const defaultTimeout = 120 * time.Second dl, ok := t.Deadline() if !ok { return defaultTimeout } if d := time.Until(dl); d <= defaultTimeout { return d } return defaultTimeout } logrus.SetLevel(logrus.DebugLevel) conf := DefaultConfig() // Shorten durations to speed up test execution. conf.rejoinClusterDuration = conf.rejoinClusterDuration / 10 conf.rejoinClusterInterval = conf.rejoinClusterInterval / 10 dbs := createNetworkDBInstances(t, 5, "node", conf) // Get the node IP used currently node := dbs[0].nodes[dbs[0].config.NodeID] baseIPStr := node.Addr.String() // Node 0,1,2 are going to be the 3 bootstrap nodes members := []string{fmt.Sprintf("%s:%d", baseIPStr, dbs[0].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[1].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[2].config.BindPort)} // Rejoining will update the list of the bootstrap members for i := 3; i < 5; i++ { t.Logf("Re-joining: %d", i) assert.Check(t, dbs[i].Join(members)) } // Now the 3 bootstrap nodes will cleanly leave, and will be properly removed from the other 2 nodes for i := 0; i < 3; i++ { logrus.Infof("node %d leaving", i) dbs[i].Close() } checkDBs := make(map[string]*NetworkDB) for i := 3; i < 5; i++ { db := dbs[i] checkDBs[db.config.Hostname] = db } // Give some time to let the system propagate the messages and free up the ports check := func(t poll.LogT) poll.Result { // Verify that the nodes are actually all gone and marked appropiately for name, db := range checkDBs { db.RLock() if (len(db.leftNodes) != 3) || (len(db.failedNodes) != 0) { for name := range db.leftNodes { t.Logf("%s: Node %s left", db.config.Hostname, name) } for name := range db.failedNodes { t.Logf("%s: Node %s failed", db.config.Hostname, name) } db.RUnlock() return poll.Continue("%s:Waiting for all nodes to cleanly leave, left: %d, failed nodes: %d", name, len(db.leftNodes), len(db.failedNodes)) } db.RUnlock() t.Logf("%s: OK", name) delete(checkDBs, name) } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) // Spawn again the first 3 nodes with different names but same IP:port for i := 0; i < 3; i++ { logrus.Infof("node %d coming back", i) dbs[i].config.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) dbs[i] = launchNode(t, *dbs[i].config) } // Give some time for the reconnect routine to run, it runs every 6s. check = func(t poll.LogT) poll.Result { // Verify that the cluster is again all connected. Note that the 3 previous node did not do any join for i := 0; i < 5; i++ { db := dbs[i] db.RLock() if len(db.nodes) != 5 { db.RUnlock() return poll.Continue("%s:Waiting to connect to all nodes", dbs[i].config.Hostname) } if len(db.failedNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting for 0 failedNodes", dbs[i].config.Hostname) } if i < 3 { // nodes from 0 to 3 has no left nodes if len(db.leftNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting to have no leftNodes", dbs[i].config.Hostname) } } else { // nodes from 4 to 5 has the 3 previous left nodes if len(db.leftNodes) != 3 { db.RUnlock() return poll.Continue("%s:Waiting to have 3 leftNodes", dbs[i].config.Hostname) } } db.RUnlock() } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) closeNetworkDBInstances(t, dbs) }
package networkdb import ( "fmt" "log" "net" "os" "strconv" "sync/atomic" "testing" "time" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-events" "github.com/hashicorp/memberlist" "github.com/hashicorp/serf/serf" "github.com/sirupsen/logrus" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" ) var dbPort int32 = 10000 func TestMain(m *testing.M) { os.WriteFile("/proc/sys/net/ipv6/conf/lo/disable_ipv6", []byte{'0', '\n'}, 0644) logrus.SetLevel(logrus.ErrorLevel) os.Exit(m.Run()) } func launchNode(t *testing.T, conf Config) *NetworkDB { t.Helper() db, err := New(&conf) assert.NilError(t, err) return db } func createNetworkDBInstances(t *testing.T, num int, namePrefix string, conf *Config) []*NetworkDB { t.Helper() var dbs []*NetworkDB for i := 0; i < num; i++ { localConfig := *conf localConfig.Hostname = fmt.Sprintf("%s%d", namePrefix, i+1) localConfig.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) localConfig.BindPort = int(atomic.AddInt32(&dbPort, 1)) db := launchNode(t, localConfig) if i != 0 { assert.Check(t, db.Join([]string{fmt.Sprintf("localhost:%d", db.config.BindPort-1)})) } dbs = append(dbs, db) } // Wait till the cluster creation is successful check := func(t poll.LogT) poll.Result { // Check that the cluster is properly created for i := 0; i < num; i++ { if num != len(dbs[i].ClusterPeers()) { return poll.Continue("%s:Waiting for cluster peers to be established", dbs[i].config.Hostname) } } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(2*time.Second), poll.WithTimeout(20*time.Second)) return dbs } func closeNetworkDBInstances(t *testing.T, dbs []*NetworkDB) { t.Helper() log.Print("Closing DB instances...") for _, db := range dbs { db.Close() } } func (db *NetworkDB) verifyNodeExistence(t *testing.T, node string, present bool) { t.Helper() for i := 0; i < 80; i++ { db.RLock() _, ok := db.nodes[node] db.RUnlock() if present && ok { return } if !present && !ok { return } time.Sleep(50 * time.Millisecond) } t.Errorf("%v(%v): Node existence verification for node %s failed", db.config.Hostname, db.config.NodeID, node) } func (db *NetworkDB) verifyNetworkExistence(t *testing.T, node string, id string, present bool) { t.Helper() const sleepInterval = 50 * time.Millisecond var maxRetries int64 if dl, ok := t.Deadline(); ok { maxRetries = int64(time.Until(dl) / sleepInterval) } else { maxRetries = 80 } for i := int64(0); i < maxRetries; i++ { db.RLock() nn, nnok := db.networks[node] db.RUnlock() if nnok { n, ok := nn[id] if present && ok { return } if !present && ((ok && n.leaving) || !ok) { return } } time.Sleep(sleepInterval) } t.Error("Network existence verification failed") } func (db *NetworkDB) verifyEntryExistence(t *testing.T, tname, nid, key, value string, present bool) { t.Helper() n := 80 for i := 0; i < n; i++ { entry, err := db.getEntry(tname, nid, key) if present && err == nil && string(entry.value) == value { return } if !present && ((err == nil && entry.deleting) || (err != nil)) { return } if i == n-1 && !present && err != nil { return } time.Sleep(50 * time.Millisecond) } t.Errorf("Entry existence verification test failed for %v(%v)", db.config.Hostname, db.config.NodeID) } func testWatch(t *testing.T, ch chan events.Event, ev interface{}, tname, nid, key, value string) { t.Helper() select { case rcvdEv := <-ch: assert.Check(t, is.Equal(fmt.Sprintf("%T", rcvdEv), fmt.Sprintf("%T", ev))) switch typ := rcvdEv.(type) { case CreateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case UpdateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case DeleteEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) } case <-time.After(time.Second): t.Fail() return } } func TestNetworkDBSimple(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetwork(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetworks(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) n := 10 for i := 1; i <= n; i++ { err := dbs[0].JoinNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].JoinNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), true) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), true) } for i := 1; i <= n; i++ { err := dbs[0].LeaveNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].LeaveNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), false) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), false) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntry(t *testing.T) { dbs := createNetworkDBInstances(t, 3, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[2].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntries(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) n := 10 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].CreateEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i), []byte(fmt.Sprintf("test_value1%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), fmt.Sprintf("test_value1%d", i), true) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } // Verify deletes for i := 1; i <= n; i++ { err = dbs[0].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), "", false) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), "", false) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeLeave(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[0].Close() dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) dbs[1].Close() } func TestNetworkDBWatch(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) ch, cancel := dbs[1].Watch("", "", "") err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) testWatch(t, ch.C, CreateEvent{}, "test_table", "network1", "test_key", "test_value") err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) testWatch(t, ch.C, UpdateEvent{}, "test_table", "network1", "test_key", "test_updated_value") err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) testWatch(t, ch.C, DeleteEvent{}, "test_table", "network1", "test_key", "") cancel() closeNetworkDBInstances(t, dbs) } func TestNetworkDBBulkSync(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) n := 1000 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDMediumCluster(t *testing.T) { n := 5 dbs := createNetworkDBInstances(t, n, "node", DefaultConfig()) for i := 0; i < n; i++ { for j := 0; j < n; j++ { if i == j { continue } dbs[i].verifyNodeExistence(t, dbs[j].config.NodeID, true) } } for i := 0; i < n; i++ { err := dbs[i].JoinNetwork("network1") assert.NilError(t, err) } for i := 0; i < n; i++ { for j := 0; j < n; j++ { dbs[i].verifyNetworkExistence(t, dbs[j].config.NodeID, "network1", true) } } err := dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) } err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) } err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) } for i := 1; i < n; i++ { _, err = dbs[i].GetEntry("test_table", "network1", "test_key") assert.Check(t, is.ErrorContains(err, "")) assert.Check(t, is.Contains(err.Error(), "deleted and pending garbage collection"), err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeJoinLeaveIteration(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) var ( dbIndex int32 staleNetworkTime [2]serf.LamportTime expectNodeCount int network = "network1" ) dbChangeWitness := func(t poll.LogT) poll.Result { db := dbs[dbIndex] networkTime := db.networkClock.Time() if networkTime <= staleNetworkTime[dbIndex] { return poll.Continue("network time is stale, no change registered yet.") } count := -1 db.Lock() if nodes, ok := db.networkNodes[network]; ok { count = len(nodes) } db.Unlock() if count != expectNodeCount { return poll.Continue("current number of nodes is %d, expect %d.", count, expectNodeCount) } return poll.Success() } // Single node Join/Leave staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbIndex, expectNodeCount = 0, 1 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) dbIndex, expectNodeCount = 0, 0 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) // Multiple nodes Join/Leave staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) // Wait for the propagation on db[0] dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) dbIndex, expectNodeCount = 0, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) if n, ok := dbs[0].networks[dbs[0].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Wait for the propagation on db[1] dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) dbIndex, expectNodeCount = 1, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) if n, ok := dbs[1].networks[dbs[1].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Try a quick leave/join staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) dbIndex, expectNodeCount = 0, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) dbIndex, expectNodeCount = 1, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) closeNetworkDBInstances(t, dbs) } func TestNetworkDBGarbageCollection(t *testing.T) { keysWriteDelete := 5 config := DefaultConfig() config.reapEntryInterval = 30 * time.Second config.StatsPrintPeriod = 15 * time.Second dbs := createNetworkDBInstances(t, 3, "node", config) // 2 Nodes join network err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].CreateEntry("testTable", "network1", "key-"+strconv.Itoa(i), []byte("value")) assert.NilError(t, err) } time.Sleep(time.Second) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].DeleteEntry("testTable", "network1", "key-"+strconv.Itoa(i)) assert.NilError(t, err) } for i := 0; i < 2; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // from this point the timer for the garbage collection started, wait 5 seconds and then join a new node time.Sleep(5 * time.Second) err = dbs[2].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // at this point the entries should had been all deleted time.Sleep(30 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } // make sure that entries are not coming back time.Sleep(15 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } closeNetworkDBInstances(t, dbs) } func TestFindNode(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["active"] = &node{Node: memberlist.Node{Name: "active"}} dbs[0].failedNodes["failed"] = &node{Node: memberlist.Node{Name: "failed"}} dbs[0].leftNodes["left"] = &node{Node: memberlist.Node{Name: "left"}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) n, currState, m := dbs[0].findNode("active") assert.Check(t, n != nil) assert.Check(t, is.Equal("active", n.Name)) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, m != nil) // delete the entry manually delete(m, "active") // test if can be still find n, currState, m = dbs[0].findNode("active") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) n, currState, m = dbs[0].findNode("failed") assert.Check(t, n != nil) assert.Check(t, is.Equal("failed", n.Name)) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, m != nil) // find and remove n, currState, m = dbs[0].findNode("left") assert.Check(t, n != nil) assert.Check(t, is.Equal("left", n.Name)) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, m != nil) delete(m, "left") n, currState, m = dbs[0].findNode("left") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) closeNetworkDBInstances(t, dbs) } func TestChangeNodeState(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1"}} dbs[0].nodes["node2"] = &node{Node: memberlist.Node{Name: "node2"}} dbs[0].nodes["node3"] = &node{Node: memberlist.Node{Name: "node3"}} // active nodes is 4 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) n, currState, m := dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) // node1 to failed dbs[0].changeNodeState("node1", nodeFailedState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // node1 back to active dbs[0].changeNodeState("node1", nodeActiveState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, is.Equal(time.Duration(0), n.reapTime)) // node1 to left dbs[0].changeNodeState("node1", nodeLeftState) dbs[0].changeNodeState("node2", nodeLeftState) dbs[0].changeNodeState("node3", nodeLeftState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node2") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node2", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node3") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node3", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 1)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestNodeReincarnation(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1", Addr: net.ParseIP("192.168.1.1")}} dbs[0].leftNodes["node2"] = &node{Node: memberlist.Node{Name: "node2", Addr: net.ParseIP("192.168.1.2")}} dbs[0].failedNodes["node3"] = &node{Node: memberlist.Node{Name: "node3", Addr: net.ParseIP("192.168.1.3")}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) b := dbs[0].purgeReincarnation(&memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}) assert.Check(t, b) dbs[0].nodes["node4"] = &node{Node: memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.2")}) assert.Check(t, b) dbs[0].nodes["node5"] = &node{Node: memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.3")}) assert.Check(t, b) dbs[0].nodes["node6"] = &node{Node: memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.10")}) assert.Check(t, !b) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestParallelCreate(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestParallelDelete(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) assert.NilError(t, err) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].DeleteEntry("testTable", "testNetwork", "key") if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestNetworkDBIslands(t *testing.T) { pollTimeout := func() time.Duration { const defaultTimeout = 120 * time.Second dl, ok := t.Deadline() if !ok { return defaultTimeout } if d := time.Until(dl); d <= defaultTimeout { return d } return defaultTimeout } logrus.SetLevel(logrus.DebugLevel) conf := DefaultConfig() // Shorten durations to speed up test execution. conf.rejoinClusterDuration = conf.rejoinClusterDuration / 10 conf.rejoinClusterInterval = conf.rejoinClusterInterval / 10 dbs := createNetworkDBInstances(t, 5, "node", conf) // Get the node IP used currently node := dbs[0].nodes[dbs[0].config.NodeID] baseIPStr := node.Addr.String() // Node 0,1,2 are going to be the 3 bootstrap nodes members := []string{fmt.Sprintf("%s:%d", baseIPStr, dbs[0].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[1].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[2].config.BindPort)} // Rejoining will update the list of the bootstrap members for i := 3; i < 5; i++ { t.Logf("Re-joining: %d", i) assert.Check(t, dbs[i].Join(members)) } // Now the 3 bootstrap nodes will cleanly leave, and will be properly removed from the other 2 nodes for i := 0; i < 3; i++ { logrus.Infof("node %d leaving", i) dbs[i].Close() } checkDBs := make(map[string]*NetworkDB) for i := 3; i < 5; i++ { db := dbs[i] checkDBs[db.config.Hostname] = db } // Give some time to let the system propagate the messages and free up the ports check := func(t poll.LogT) poll.Result { // Verify that the nodes are actually all gone and marked appropiately for name, db := range checkDBs { db.RLock() if (len(db.leftNodes) != 3) || (len(db.failedNodes) != 0) { for name := range db.leftNodes { t.Logf("%s: Node %s left", db.config.Hostname, name) } for name := range db.failedNodes { t.Logf("%s: Node %s failed", db.config.Hostname, name) } db.RUnlock() return poll.Continue("%s:Waiting for all nodes to cleanly leave, left: %d, failed nodes: %d", name, len(db.leftNodes), len(db.failedNodes)) } db.RUnlock() t.Logf("%s: OK", name) delete(checkDBs, name) } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) // Spawn again the first 3 nodes with different names but same IP:port for i := 0; i < 3; i++ { logrus.Infof("node %d coming back", i) dbs[i].config.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) dbs[i] = launchNode(t, *dbs[i].config) } // Give some time for the reconnect routine to run, it runs every 6s. check = func(t poll.LogT) poll.Result { // Verify that the cluster is again all connected. Note that the 3 previous node did not do any join for i := 0; i < 5; i++ { db := dbs[i] db.RLock() if len(db.nodes) != 5 { db.RUnlock() return poll.Continue("%s:Waiting to connect to all nodes", dbs[i].config.Hostname) } if len(db.failedNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting for 0 failedNodes", dbs[i].config.Hostname) } if i < 3 { // nodes from 0 to 3 has no left nodes if len(db.leftNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting to have no leftNodes", dbs[i].config.Hostname) } } else { // nodes from 4 to 5 has the 3 previous left nodes if len(db.leftNodes) != 3 { db.RUnlock() return poll.Continue("%s:Waiting to have 3 leftNodes", dbs[i].config.Hostname) } } db.RUnlock() } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) closeNetworkDBInstances(t, dbs) }
zq-david-wang
d515863abcf456dfd5a1c979a3432ba6763dbad3
49f021ebf00a76d74f5ce158244083e2dfba26fb
Have you considered using [`poll.WaitOn`](https://pkg.go.dev/gotest.tools/[email protected]/poll#WaitOn) instead of hand-rolling a polling loop?
corhere
4,474
moby/moby
42,829
Test: wait for network changes in TestNetworkDBNodeJoinLeaveIteration
Signed-off-by: David Wang <[email protected]> fix #42698 In network node change test, the expected behavior is focused on how many nodes left in networkDB, besides timing issues, things would also go tricky for a leave-then-join sequence, if the check (counting the nodes) happened before the first "leave" event, then the testcase actually miss its target and report PASS without verifying its final result; if the check happened after the 'leave' event, but before the 'join' event, the test would report FAIL unnecessary; This code change would check both the db changes and the node count, it would report PASS only when networkdb has indeed changed and the node count is expected
null
2021-09-08 03:46:14+00:00
2022-07-22 04:55:12+00:00
libnetwork/networkdb/networkdb_test.go
package networkdb import ( "fmt" "log" "net" "os" "strconv" "sync/atomic" "testing" "time" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-events" "github.com/hashicorp/memberlist" "github.com/sirupsen/logrus" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" ) var dbPort int32 = 10000 func TestMain(m *testing.M) { os.WriteFile("/proc/sys/net/ipv6/conf/lo/disable_ipv6", []byte{'0', '\n'}, 0644) logrus.SetLevel(logrus.ErrorLevel) os.Exit(m.Run()) } func launchNode(t *testing.T, conf Config) *NetworkDB { t.Helper() db, err := New(&conf) assert.NilError(t, err) return db } func createNetworkDBInstances(t *testing.T, num int, namePrefix string, conf *Config) []*NetworkDB { t.Helper() var dbs []*NetworkDB for i := 0; i < num; i++ { localConfig := *conf localConfig.Hostname = fmt.Sprintf("%s%d", namePrefix, i+1) localConfig.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) localConfig.BindPort = int(atomic.AddInt32(&dbPort, 1)) db := launchNode(t, localConfig) if i != 0 { assert.Check(t, db.Join([]string{fmt.Sprintf("localhost:%d", db.config.BindPort-1)})) } dbs = append(dbs, db) } // Wait till the cluster creation is successful check := func(t poll.LogT) poll.Result { // Check that the cluster is properly created for i := 0; i < num; i++ { if num != len(dbs[i].ClusterPeers()) { return poll.Continue("%s:Waiting for cluster peers to be established", dbs[i].config.Hostname) } } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(2*time.Second), poll.WithTimeout(20*time.Second)) return dbs } func closeNetworkDBInstances(t *testing.T, dbs []*NetworkDB) { t.Helper() log.Print("Closing DB instances...") for _, db := range dbs { db.Close() } } func (db *NetworkDB) verifyNodeExistence(t *testing.T, node string, present bool) { t.Helper() for i := 0; i < 80; i++ { db.RLock() _, ok := db.nodes[node] db.RUnlock() if present && ok { return } if !present && !ok { return } time.Sleep(50 * time.Millisecond) } t.Errorf("%v(%v): Node existence verification for node %s failed", db.config.Hostname, db.config.NodeID, node) } func (db *NetworkDB) verifyNetworkExistence(t *testing.T, node string, id string, present bool) { t.Helper() const sleepInterval = 50 * time.Millisecond var maxRetries int64 if dl, ok := t.Deadline(); ok { maxRetries = int64(time.Until(dl) / sleepInterval) } else { maxRetries = 80 } for i := int64(0); i < maxRetries; i++ { db.RLock() nn, nnok := db.networks[node] db.RUnlock() if nnok { n, ok := nn[id] if present && ok { return } if !present && ((ok && n.leaving) || !ok) { return } } time.Sleep(sleepInterval) } t.Error("Network existence verification failed") } func (db *NetworkDB) verifyEntryExistence(t *testing.T, tname, nid, key, value string, present bool) { t.Helper() n := 80 for i := 0; i < n; i++ { entry, err := db.getEntry(tname, nid, key) if present && err == nil && string(entry.value) == value { return } if !present && ((err == nil && entry.deleting) || (err != nil)) { return } if i == n-1 && !present && err != nil { return } time.Sleep(50 * time.Millisecond) } t.Errorf("Entry existence verification test failed for %v(%v)", db.config.Hostname, db.config.NodeID) } func testWatch(t *testing.T, ch chan events.Event, ev interface{}, tname, nid, key, value string) { t.Helper() select { case rcvdEv := <-ch: assert.Check(t, is.Equal(fmt.Sprintf("%T", rcvdEv), fmt.Sprintf("%T", ev))) switch typ := rcvdEv.(type) { case CreateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case UpdateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case DeleteEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) } case <-time.After(time.Second): t.Fail() return } } func TestNetworkDBSimple(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetwork(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetworks(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) n := 10 for i := 1; i <= n; i++ { err := dbs[0].JoinNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].JoinNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), true) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), true) } for i := 1; i <= n; i++ { err := dbs[0].LeaveNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].LeaveNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), false) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), false) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntry(t *testing.T) { dbs := createNetworkDBInstances(t, 3, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[2].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntries(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) n := 10 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].CreateEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i), []byte(fmt.Sprintf("test_value1%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), fmt.Sprintf("test_value1%d", i), true) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } // Verify deletes for i := 1; i <= n; i++ { err = dbs[0].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), "", false) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), "", false) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeLeave(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[0].Close() dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) dbs[1].Close() } func TestNetworkDBWatch(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) ch, cancel := dbs[1].Watch("", "", "") err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) testWatch(t, ch.C, CreateEvent{}, "test_table", "network1", "test_key", "test_value") err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) testWatch(t, ch.C, UpdateEvent{}, "test_table", "network1", "test_key", "test_updated_value") err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) testWatch(t, ch.C, DeleteEvent{}, "test_table", "network1", "test_key", "") cancel() closeNetworkDBInstances(t, dbs) } func TestNetworkDBBulkSync(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) n := 1000 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDMediumCluster(t *testing.T) { n := 5 dbs := createNetworkDBInstances(t, n, "node", DefaultConfig()) for i := 0; i < n; i++ { for j := 0; j < n; j++ { if i == j { continue } dbs[i].verifyNodeExistence(t, dbs[j].config.NodeID, true) } } for i := 0; i < n; i++ { err := dbs[i].JoinNetwork("network1") assert.NilError(t, err) } for i := 0; i < n; i++ { for j := 0; j < n; j++ { dbs[i].verifyNetworkExistence(t, dbs[j].config.NodeID, "network1", true) } } err := dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) } err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) } err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) } for i := 1; i < n; i++ { _, err = dbs[i].GetEntry("test_table", "network1", "test_key") assert.Check(t, is.ErrorContains(err, "")) assert.Check(t, is.Contains(err.Error(), "deleted and pending garbage collection"), err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeJoinLeaveIteration(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) // Single node Join/Leave err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) if len(dbs[0].networkNodes["network1"]) != 1 { t.Fatalf("The networkNodes list has to have be 1 instead of %d", len(dbs[0].networkNodes["network1"])) } err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) if len(dbs[0].networkNodes["network1"]) != 0 { t.Fatalf("The networkNodes list has to have be 0 instead of %d", len(dbs[0].networkNodes["network1"])) } // Multiple nodes Join/Leave err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) // Wait for the propagation on db[0] dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) if len(dbs[0].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[0].networkNodes["network1"]), dbs[0].networkNodes["network1"]) } if n, ok := dbs[0].networks[dbs[0].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Wait for the propagation on db[1] dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) if len(dbs[1].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[1].networkNodes["network1"]), dbs[1].networkNodes["network1"]) } if n, ok := dbs[1].networks[dbs[1].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Try a quick leave/join err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) if len(dbs[0].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[0].networkNodes["network1"]), dbs[0].networkNodes["network1"]) } dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) if len(dbs[1].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[1].networkNodes["network1"]), dbs[1].networkNodes["network1"]) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBGarbageCollection(t *testing.T) { keysWriteDelete := 5 config := DefaultConfig() config.reapEntryInterval = 30 * time.Second config.StatsPrintPeriod = 15 * time.Second dbs := createNetworkDBInstances(t, 3, "node", config) // 2 Nodes join network err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].CreateEntry("testTable", "network1", "key-"+strconv.Itoa(i), []byte("value")) assert.NilError(t, err) } time.Sleep(time.Second) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].DeleteEntry("testTable", "network1", "key-"+strconv.Itoa(i)) assert.NilError(t, err) } for i := 0; i < 2; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // from this point the timer for the garbage collection started, wait 5 seconds and then join a new node time.Sleep(5 * time.Second) err = dbs[2].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // at this point the entries should had been all deleted time.Sleep(30 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } // make sure that entries are not coming back time.Sleep(15 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } closeNetworkDBInstances(t, dbs) } func TestFindNode(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["active"] = &node{Node: memberlist.Node{Name: "active"}} dbs[0].failedNodes["failed"] = &node{Node: memberlist.Node{Name: "failed"}} dbs[0].leftNodes["left"] = &node{Node: memberlist.Node{Name: "left"}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) n, currState, m := dbs[0].findNode("active") assert.Check(t, n != nil) assert.Check(t, is.Equal("active", n.Name)) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, m != nil) // delete the entry manually delete(m, "active") // test if can be still find n, currState, m = dbs[0].findNode("active") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) n, currState, m = dbs[0].findNode("failed") assert.Check(t, n != nil) assert.Check(t, is.Equal("failed", n.Name)) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, m != nil) // find and remove n, currState, m = dbs[0].findNode("left") assert.Check(t, n != nil) assert.Check(t, is.Equal("left", n.Name)) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, m != nil) delete(m, "left") n, currState, m = dbs[0].findNode("left") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) closeNetworkDBInstances(t, dbs) } func TestChangeNodeState(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1"}} dbs[0].nodes["node2"] = &node{Node: memberlist.Node{Name: "node2"}} dbs[0].nodes["node3"] = &node{Node: memberlist.Node{Name: "node3"}} // active nodes is 4 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) n, currState, m := dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) // node1 to failed dbs[0].changeNodeState("node1", nodeFailedState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // node1 back to active dbs[0].changeNodeState("node1", nodeActiveState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, is.Equal(time.Duration(0), n.reapTime)) // node1 to left dbs[0].changeNodeState("node1", nodeLeftState) dbs[0].changeNodeState("node2", nodeLeftState) dbs[0].changeNodeState("node3", nodeLeftState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node2") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node2", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node3") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node3", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 1)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestNodeReincarnation(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1", Addr: net.ParseIP("192.168.1.1")}} dbs[0].leftNodes["node2"] = &node{Node: memberlist.Node{Name: "node2", Addr: net.ParseIP("192.168.1.2")}} dbs[0].failedNodes["node3"] = &node{Node: memberlist.Node{Name: "node3", Addr: net.ParseIP("192.168.1.3")}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) b := dbs[0].purgeReincarnation(&memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}) assert.Check(t, b) dbs[0].nodes["node4"] = &node{Node: memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.2")}) assert.Check(t, b) dbs[0].nodes["node5"] = &node{Node: memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.3")}) assert.Check(t, b) dbs[0].nodes["node6"] = &node{Node: memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.10")}) assert.Check(t, !b) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestParallelCreate(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestParallelDelete(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) assert.NilError(t, err) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].DeleteEntry("testTable", "testNetwork", "key") if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestNetworkDBIslands(t *testing.T) { pollTimeout := func() time.Duration { const defaultTimeout = 120 * time.Second dl, ok := t.Deadline() if !ok { return defaultTimeout } if d := time.Until(dl); d <= defaultTimeout { return d } return defaultTimeout } logrus.SetLevel(logrus.DebugLevel) conf := DefaultConfig() // Shorten durations to speed up test execution. conf.rejoinClusterDuration = conf.rejoinClusterDuration / 10 conf.rejoinClusterInterval = conf.rejoinClusterInterval / 10 dbs := createNetworkDBInstances(t, 5, "node", conf) // Get the node IP used currently node := dbs[0].nodes[dbs[0].config.NodeID] baseIPStr := node.Addr.String() // Node 0,1,2 are going to be the 3 bootstrap nodes members := []string{fmt.Sprintf("%s:%d", baseIPStr, dbs[0].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[1].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[2].config.BindPort)} // Rejoining will update the list of the bootstrap members for i := 3; i < 5; i++ { t.Logf("Re-joining: %d", i) assert.Check(t, dbs[i].Join(members)) } // Now the 3 bootstrap nodes will cleanly leave, and will be properly removed from the other 2 nodes for i := 0; i < 3; i++ { logrus.Infof("node %d leaving", i) dbs[i].Close() } checkDBs := make(map[string]*NetworkDB) for i := 3; i < 5; i++ { db := dbs[i] checkDBs[db.config.Hostname] = db } // Give some time to let the system propagate the messages and free up the ports check := func(t poll.LogT) poll.Result { // Verify that the nodes are actually all gone and marked appropiately for name, db := range checkDBs { db.RLock() if (len(db.leftNodes) != 3) || (len(db.failedNodes) != 0) { for name := range db.leftNodes { t.Logf("%s: Node %s left", db.config.Hostname, name) } for name := range db.failedNodes { t.Logf("%s: Node %s failed", db.config.Hostname, name) } db.RUnlock() return poll.Continue("%s:Waiting for all nodes to cleanly leave, left: %d, failed nodes: %d", name, len(db.leftNodes), len(db.failedNodes)) } db.RUnlock() t.Logf("%s: OK", name) delete(checkDBs, name) } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) // Spawn again the first 3 nodes with different names but same IP:port for i := 0; i < 3; i++ { logrus.Infof("node %d coming back", i) dbs[i].config.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) dbs[i] = launchNode(t, *dbs[i].config) } // Give some time for the reconnect routine to run, it runs every 6s. check = func(t poll.LogT) poll.Result { // Verify that the cluster is again all connected. Note that the 3 previous node did not do any join for i := 0; i < 5; i++ { db := dbs[i] db.RLock() if len(db.nodes) != 5 { db.RUnlock() return poll.Continue("%s:Waiting to connect to all nodes", dbs[i].config.Hostname) } if len(db.failedNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting for 0 failedNodes", dbs[i].config.Hostname) } if i < 3 { // nodes from 0 to 3 has no left nodes if len(db.leftNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting to have no leftNodes", dbs[i].config.Hostname) } } else { // nodes from 4 to 5 has the 3 previous left nodes if len(db.leftNodes) != 3 { db.RUnlock() return poll.Continue("%s:Waiting to have 3 leftNodes", dbs[i].config.Hostname) } } db.RUnlock() } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) closeNetworkDBInstances(t, dbs) }
package networkdb import ( "fmt" "log" "net" "os" "strconv" "sync/atomic" "testing" "time" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-events" "github.com/hashicorp/memberlist" "github.com/hashicorp/serf/serf" "github.com/sirupsen/logrus" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" ) var dbPort int32 = 10000 func TestMain(m *testing.M) { os.WriteFile("/proc/sys/net/ipv6/conf/lo/disable_ipv6", []byte{'0', '\n'}, 0644) logrus.SetLevel(logrus.ErrorLevel) os.Exit(m.Run()) } func launchNode(t *testing.T, conf Config) *NetworkDB { t.Helper() db, err := New(&conf) assert.NilError(t, err) return db } func createNetworkDBInstances(t *testing.T, num int, namePrefix string, conf *Config) []*NetworkDB { t.Helper() var dbs []*NetworkDB for i := 0; i < num; i++ { localConfig := *conf localConfig.Hostname = fmt.Sprintf("%s%d", namePrefix, i+1) localConfig.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) localConfig.BindPort = int(atomic.AddInt32(&dbPort, 1)) db := launchNode(t, localConfig) if i != 0 { assert.Check(t, db.Join([]string{fmt.Sprintf("localhost:%d", db.config.BindPort-1)})) } dbs = append(dbs, db) } // Wait till the cluster creation is successful check := func(t poll.LogT) poll.Result { // Check that the cluster is properly created for i := 0; i < num; i++ { if num != len(dbs[i].ClusterPeers()) { return poll.Continue("%s:Waiting for cluster peers to be established", dbs[i].config.Hostname) } } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(2*time.Second), poll.WithTimeout(20*time.Second)) return dbs } func closeNetworkDBInstances(t *testing.T, dbs []*NetworkDB) { t.Helper() log.Print("Closing DB instances...") for _, db := range dbs { db.Close() } } func (db *NetworkDB) verifyNodeExistence(t *testing.T, node string, present bool) { t.Helper() for i := 0; i < 80; i++ { db.RLock() _, ok := db.nodes[node] db.RUnlock() if present && ok { return } if !present && !ok { return } time.Sleep(50 * time.Millisecond) } t.Errorf("%v(%v): Node existence verification for node %s failed", db.config.Hostname, db.config.NodeID, node) } func (db *NetworkDB) verifyNetworkExistence(t *testing.T, node string, id string, present bool) { t.Helper() const sleepInterval = 50 * time.Millisecond var maxRetries int64 if dl, ok := t.Deadline(); ok { maxRetries = int64(time.Until(dl) / sleepInterval) } else { maxRetries = 80 } for i := int64(0); i < maxRetries; i++ { db.RLock() nn, nnok := db.networks[node] db.RUnlock() if nnok { n, ok := nn[id] if present && ok { return } if !present && ((ok && n.leaving) || !ok) { return } } time.Sleep(sleepInterval) } t.Error("Network existence verification failed") } func (db *NetworkDB) verifyEntryExistence(t *testing.T, tname, nid, key, value string, present bool) { t.Helper() n := 80 for i := 0; i < n; i++ { entry, err := db.getEntry(tname, nid, key) if present && err == nil && string(entry.value) == value { return } if !present && ((err == nil && entry.deleting) || (err != nil)) { return } if i == n-1 && !present && err != nil { return } time.Sleep(50 * time.Millisecond) } t.Errorf("Entry existence verification test failed for %v(%v)", db.config.Hostname, db.config.NodeID) } func testWatch(t *testing.T, ch chan events.Event, ev interface{}, tname, nid, key, value string) { t.Helper() select { case rcvdEv := <-ch: assert.Check(t, is.Equal(fmt.Sprintf("%T", rcvdEv), fmt.Sprintf("%T", ev))) switch typ := rcvdEv.(type) { case CreateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case UpdateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case DeleteEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) } case <-time.After(time.Second): t.Fail() return } } func TestNetworkDBSimple(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetwork(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetworks(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) n := 10 for i := 1; i <= n; i++ { err := dbs[0].JoinNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].JoinNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), true) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), true) } for i := 1; i <= n; i++ { err := dbs[0].LeaveNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].LeaveNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), false) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), false) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntry(t *testing.T) { dbs := createNetworkDBInstances(t, 3, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[2].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntries(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) n := 10 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].CreateEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i), []byte(fmt.Sprintf("test_value1%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), fmt.Sprintf("test_value1%d", i), true) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } // Verify deletes for i := 1; i <= n; i++ { err = dbs[0].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), "", false) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), "", false) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeLeave(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[0].Close() dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) dbs[1].Close() } func TestNetworkDBWatch(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) ch, cancel := dbs[1].Watch("", "", "") err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) testWatch(t, ch.C, CreateEvent{}, "test_table", "network1", "test_key", "test_value") err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) testWatch(t, ch.C, UpdateEvent{}, "test_table", "network1", "test_key", "test_updated_value") err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) testWatch(t, ch.C, DeleteEvent{}, "test_table", "network1", "test_key", "") cancel() closeNetworkDBInstances(t, dbs) } func TestNetworkDBBulkSync(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) n := 1000 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDMediumCluster(t *testing.T) { n := 5 dbs := createNetworkDBInstances(t, n, "node", DefaultConfig()) for i := 0; i < n; i++ { for j := 0; j < n; j++ { if i == j { continue } dbs[i].verifyNodeExistence(t, dbs[j].config.NodeID, true) } } for i := 0; i < n; i++ { err := dbs[i].JoinNetwork("network1") assert.NilError(t, err) } for i := 0; i < n; i++ { for j := 0; j < n; j++ { dbs[i].verifyNetworkExistence(t, dbs[j].config.NodeID, "network1", true) } } err := dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) } err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) } err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) } for i := 1; i < n; i++ { _, err = dbs[i].GetEntry("test_table", "network1", "test_key") assert.Check(t, is.ErrorContains(err, "")) assert.Check(t, is.Contains(err.Error(), "deleted and pending garbage collection"), err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeJoinLeaveIteration(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) var ( dbIndex int32 staleNetworkTime [2]serf.LamportTime expectNodeCount int network = "network1" ) dbChangeWitness := func(t poll.LogT) poll.Result { db := dbs[dbIndex] networkTime := db.networkClock.Time() if networkTime <= staleNetworkTime[dbIndex] { return poll.Continue("network time is stale, no change registered yet.") } count := -1 db.Lock() if nodes, ok := db.networkNodes[network]; ok { count = len(nodes) } db.Unlock() if count != expectNodeCount { return poll.Continue("current number of nodes is %d, expect %d.", count, expectNodeCount) } return poll.Success() } // Single node Join/Leave staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbIndex, expectNodeCount = 0, 1 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) dbIndex, expectNodeCount = 0, 0 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) // Multiple nodes Join/Leave staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) // Wait for the propagation on db[0] dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) dbIndex, expectNodeCount = 0, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) if n, ok := dbs[0].networks[dbs[0].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Wait for the propagation on db[1] dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) dbIndex, expectNodeCount = 1, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) if n, ok := dbs[1].networks[dbs[1].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Try a quick leave/join staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) dbIndex, expectNodeCount = 0, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) dbIndex, expectNodeCount = 1, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) closeNetworkDBInstances(t, dbs) } func TestNetworkDBGarbageCollection(t *testing.T) { keysWriteDelete := 5 config := DefaultConfig() config.reapEntryInterval = 30 * time.Second config.StatsPrintPeriod = 15 * time.Second dbs := createNetworkDBInstances(t, 3, "node", config) // 2 Nodes join network err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].CreateEntry("testTable", "network1", "key-"+strconv.Itoa(i), []byte("value")) assert.NilError(t, err) } time.Sleep(time.Second) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].DeleteEntry("testTable", "network1", "key-"+strconv.Itoa(i)) assert.NilError(t, err) } for i := 0; i < 2; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // from this point the timer for the garbage collection started, wait 5 seconds and then join a new node time.Sleep(5 * time.Second) err = dbs[2].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // at this point the entries should had been all deleted time.Sleep(30 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } // make sure that entries are not coming back time.Sleep(15 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } closeNetworkDBInstances(t, dbs) } func TestFindNode(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["active"] = &node{Node: memberlist.Node{Name: "active"}} dbs[0].failedNodes["failed"] = &node{Node: memberlist.Node{Name: "failed"}} dbs[0].leftNodes["left"] = &node{Node: memberlist.Node{Name: "left"}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) n, currState, m := dbs[0].findNode("active") assert.Check(t, n != nil) assert.Check(t, is.Equal("active", n.Name)) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, m != nil) // delete the entry manually delete(m, "active") // test if can be still find n, currState, m = dbs[0].findNode("active") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) n, currState, m = dbs[0].findNode("failed") assert.Check(t, n != nil) assert.Check(t, is.Equal("failed", n.Name)) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, m != nil) // find and remove n, currState, m = dbs[0].findNode("left") assert.Check(t, n != nil) assert.Check(t, is.Equal("left", n.Name)) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, m != nil) delete(m, "left") n, currState, m = dbs[0].findNode("left") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) closeNetworkDBInstances(t, dbs) } func TestChangeNodeState(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1"}} dbs[0].nodes["node2"] = &node{Node: memberlist.Node{Name: "node2"}} dbs[0].nodes["node3"] = &node{Node: memberlist.Node{Name: "node3"}} // active nodes is 4 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) n, currState, m := dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) // node1 to failed dbs[0].changeNodeState("node1", nodeFailedState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // node1 back to active dbs[0].changeNodeState("node1", nodeActiveState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, is.Equal(time.Duration(0), n.reapTime)) // node1 to left dbs[0].changeNodeState("node1", nodeLeftState) dbs[0].changeNodeState("node2", nodeLeftState) dbs[0].changeNodeState("node3", nodeLeftState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node2") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node2", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node3") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node3", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 1)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestNodeReincarnation(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1", Addr: net.ParseIP("192.168.1.1")}} dbs[0].leftNodes["node2"] = &node{Node: memberlist.Node{Name: "node2", Addr: net.ParseIP("192.168.1.2")}} dbs[0].failedNodes["node3"] = &node{Node: memberlist.Node{Name: "node3", Addr: net.ParseIP("192.168.1.3")}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) b := dbs[0].purgeReincarnation(&memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}) assert.Check(t, b) dbs[0].nodes["node4"] = &node{Node: memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.2")}) assert.Check(t, b) dbs[0].nodes["node5"] = &node{Node: memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.3")}) assert.Check(t, b) dbs[0].nodes["node6"] = &node{Node: memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.10")}) assert.Check(t, !b) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestParallelCreate(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestParallelDelete(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) assert.NilError(t, err) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].DeleteEntry("testTable", "testNetwork", "key") if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestNetworkDBIslands(t *testing.T) { pollTimeout := func() time.Duration { const defaultTimeout = 120 * time.Second dl, ok := t.Deadline() if !ok { return defaultTimeout } if d := time.Until(dl); d <= defaultTimeout { return d } return defaultTimeout } logrus.SetLevel(logrus.DebugLevel) conf := DefaultConfig() // Shorten durations to speed up test execution. conf.rejoinClusterDuration = conf.rejoinClusterDuration / 10 conf.rejoinClusterInterval = conf.rejoinClusterInterval / 10 dbs := createNetworkDBInstances(t, 5, "node", conf) // Get the node IP used currently node := dbs[0].nodes[dbs[0].config.NodeID] baseIPStr := node.Addr.String() // Node 0,1,2 are going to be the 3 bootstrap nodes members := []string{fmt.Sprintf("%s:%d", baseIPStr, dbs[0].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[1].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[2].config.BindPort)} // Rejoining will update the list of the bootstrap members for i := 3; i < 5; i++ { t.Logf("Re-joining: %d", i) assert.Check(t, dbs[i].Join(members)) } // Now the 3 bootstrap nodes will cleanly leave, and will be properly removed from the other 2 nodes for i := 0; i < 3; i++ { logrus.Infof("node %d leaving", i) dbs[i].Close() } checkDBs := make(map[string]*NetworkDB) for i := 3; i < 5; i++ { db := dbs[i] checkDBs[db.config.Hostname] = db } // Give some time to let the system propagate the messages and free up the ports check := func(t poll.LogT) poll.Result { // Verify that the nodes are actually all gone and marked appropiately for name, db := range checkDBs { db.RLock() if (len(db.leftNodes) != 3) || (len(db.failedNodes) != 0) { for name := range db.leftNodes { t.Logf("%s: Node %s left", db.config.Hostname, name) } for name := range db.failedNodes { t.Logf("%s: Node %s failed", db.config.Hostname, name) } db.RUnlock() return poll.Continue("%s:Waiting for all nodes to cleanly leave, left: %d, failed nodes: %d", name, len(db.leftNodes), len(db.failedNodes)) } db.RUnlock() t.Logf("%s: OK", name) delete(checkDBs, name) } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) // Spawn again the first 3 nodes with different names but same IP:port for i := 0; i < 3; i++ { logrus.Infof("node %d coming back", i) dbs[i].config.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) dbs[i] = launchNode(t, *dbs[i].config) } // Give some time for the reconnect routine to run, it runs every 6s. check = func(t poll.LogT) poll.Result { // Verify that the cluster is again all connected. Note that the 3 previous node did not do any join for i := 0; i < 5; i++ { db := dbs[i] db.RLock() if len(db.nodes) != 5 { db.RUnlock() return poll.Continue("%s:Waiting to connect to all nodes", dbs[i].config.Hostname) } if len(db.failedNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting for 0 failedNodes", dbs[i].config.Hostname) } if i < 3 { // nodes from 0 to 3 has no left nodes if len(db.leftNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting to have no leftNodes", dbs[i].config.Hostname) } } else { // nodes from 4 to 5 has the 3 previous left nodes if len(db.leftNodes) != 3 { db.RUnlock() return poll.Continue("%s:Waiting to have 3 leftNodes", dbs[i].config.Hostname) } } db.RUnlock() } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) closeNetworkDBInstances(t, dbs) }
zq-david-wang
d515863abcf456dfd5a1c979a3432ba6763dbad3
49f021ebf00a76d74f5ce158244083e2dfba26fb
Those variables are functionally arguments to the `dbChangeWitness` poll check function. It would be a lot clearer if they were actually passed in as function arguments rather than by mutating variables. ```suggestion dbChangeWitness := func( dbIndex int32, staleNetworkTime [2]serf.LamportTime, expectNodeCount int, network string, ) poll.Check { return func(t poll.LogT) poll.Result { ```
corhere
4,475
moby/moby
42,829
Test: wait for network changes in TestNetworkDBNodeJoinLeaveIteration
Signed-off-by: David Wang <[email protected]> fix #42698 In network node change test, the expected behavior is focused on how many nodes left in networkDB, besides timing issues, things would also go tricky for a leave-then-join sequence, if the check (counting the nodes) happened before the first "leave" event, then the testcase actually miss its target and report PASS without verifying its final result; if the check happened after the 'leave' event, but before the 'join' event, the test would report FAIL unnecessary; This code change would check both the db changes and the node count, it would report PASS only when networkdb has indeed changed and the node count is expected
null
2021-09-08 03:46:14+00:00
2022-07-22 04:55:12+00:00
libnetwork/networkdb/networkdb_test.go
package networkdb import ( "fmt" "log" "net" "os" "strconv" "sync/atomic" "testing" "time" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-events" "github.com/hashicorp/memberlist" "github.com/sirupsen/logrus" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" ) var dbPort int32 = 10000 func TestMain(m *testing.M) { os.WriteFile("/proc/sys/net/ipv6/conf/lo/disable_ipv6", []byte{'0', '\n'}, 0644) logrus.SetLevel(logrus.ErrorLevel) os.Exit(m.Run()) } func launchNode(t *testing.T, conf Config) *NetworkDB { t.Helper() db, err := New(&conf) assert.NilError(t, err) return db } func createNetworkDBInstances(t *testing.T, num int, namePrefix string, conf *Config) []*NetworkDB { t.Helper() var dbs []*NetworkDB for i := 0; i < num; i++ { localConfig := *conf localConfig.Hostname = fmt.Sprintf("%s%d", namePrefix, i+1) localConfig.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) localConfig.BindPort = int(atomic.AddInt32(&dbPort, 1)) db := launchNode(t, localConfig) if i != 0 { assert.Check(t, db.Join([]string{fmt.Sprintf("localhost:%d", db.config.BindPort-1)})) } dbs = append(dbs, db) } // Wait till the cluster creation is successful check := func(t poll.LogT) poll.Result { // Check that the cluster is properly created for i := 0; i < num; i++ { if num != len(dbs[i].ClusterPeers()) { return poll.Continue("%s:Waiting for cluster peers to be established", dbs[i].config.Hostname) } } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(2*time.Second), poll.WithTimeout(20*time.Second)) return dbs } func closeNetworkDBInstances(t *testing.T, dbs []*NetworkDB) { t.Helper() log.Print("Closing DB instances...") for _, db := range dbs { db.Close() } } func (db *NetworkDB) verifyNodeExistence(t *testing.T, node string, present bool) { t.Helper() for i := 0; i < 80; i++ { db.RLock() _, ok := db.nodes[node] db.RUnlock() if present && ok { return } if !present && !ok { return } time.Sleep(50 * time.Millisecond) } t.Errorf("%v(%v): Node existence verification for node %s failed", db.config.Hostname, db.config.NodeID, node) } func (db *NetworkDB) verifyNetworkExistence(t *testing.T, node string, id string, present bool) { t.Helper() const sleepInterval = 50 * time.Millisecond var maxRetries int64 if dl, ok := t.Deadline(); ok { maxRetries = int64(time.Until(dl) / sleepInterval) } else { maxRetries = 80 } for i := int64(0); i < maxRetries; i++ { db.RLock() nn, nnok := db.networks[node] db.RUnlock() if nnok { n, ok := nn[id] if present && ok { return } if !present && ((ok && n.leaving) || !ok) { return } } time.Sleep(sleepInterval) } t.Error("Network existence verification failed") } func (db *NetworkDB) verifyEntryExistence(t *testing.T, tname, nid, key, value string, present bool) { t.Helper() n := 80 for i := 0; i < n; i++ { entry, err := db.getEntry(tname, nid, key) if present && err == nil && string(entry.value) == value { return } if !present && ((err == nil && entry.deleting) || (err != nil)) { return } if i == n-1 && !present && err != nil { return } time.Sleep(50 * time.Millisecond) } t.Errorf("Entry existence verification test failed for %v(%v)", db.config.Hostname, db.config.NodeID) } func testWatch(t *testing.T, ch chan events.Event, ev interface{}, tname, nid, key, value string) { t.Helper() select { case rcvdEv := <-ch: assert.Check(t, is.Equal(fmt.Sprintf("%T", rcvdEv), fmt.Sprintf("%T", ev))) switch typ := rcvdEv.(type) { case CreateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case UpdateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case DeleteEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) } case <-time.After(time.Second): t.Fail() return } } func TestNetworkDBSimple(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetwork(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetworks(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) n := 10 for i := 1; i <= n; i++ { err := dbs[0].JoinNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].JoinNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), true) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), true) } for i := 1; i <= n; i++ { err := dbs[0].LeaveNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].LeaveNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), false) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), false) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntry(t *testing.T) { dbs := createNetworkDBInstances(t, 3, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[2].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntries(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) n := 10 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].CreateEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i), []byte(fmt.Sprintf("test_value1%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), fmt.Sprintf("test_value1%d", i), true) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } // Verify deletes for i := 1; i <= n; i++ { err = dbs[0].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), "", false) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), "", false) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeLeave(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[0].Close() dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) dbs[1].Close() } func TestNetworkDBWatch(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) ch, cancel := dbs[1].Watch("", "", "") err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) testWatch(t, ch.C, CreateEvent{}, "test_table", "network1", "test_key", "test_value") err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) testWatch(t, ch.C, UpdateEvent{}, "test_table", "network1", "test_key", "test_updated_value") err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) testWatch(t, ch.C, DeleteEvent{}, "test_table", "network1", "test_key", "") cancel() closeNetworkDBInstances(t, dbs) } func TestNetworkDBBulkSync(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) n := 1000 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDMediumCluster(t *testing.T) { n := 5 dbs := createNetworkDBInstances(t, n, "node", DefaultConfig()) for i := 0; i < n; i++ { for j := 0; j < n; j++ { if i == j { continue } dbs[i].verifyNodeExistence(t, dbs[j].config.NodeID, true) } } for i := 0; i < n; i++ { err := dbs[i].JoinNetwork("network1") assert.NilError(t, err) } for i := 0; i < n; i++ { for j := 0; j < n; j++ { dbs[i].verifyNetworkExistence(t, dbs[j].config.NodeID, "network1", true) } } err := dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) } err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) } err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) } for i := 1; i < n; i++ { _, err = dbs[i].GetEntry("test_table", "network1", "test_key") assert.Check(t, is.ErrorContains(err, "")) assert.Check(t, is.Contains(err.Error(), "deleted and pending garbage collection"), err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeJoinLeaveIteration(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) // Single node Join/Leave err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) if len(dbs[0].networkNodes["network1"]) != 1 { t.Fatalf("The networkNodes list has to have be 1 instead of %d", len(dbs[0].networkNodes["network1"])) } err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) if len(dbs[0].networkNodes["network1"]) != 0 { t.Fatalf("The networkNodes list has to have be 0 instead of %d", len(dbs[0].networkNodes["network1"])) } // Multiple nodes Join/Leave err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) // Wait for the propagation on db[0] dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) if len(dbs[0].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[0].networkNodes["network1"]), dbs[0].networkNodes["network1"]) } if n, ok := dbs[0].networks[dbs[0].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Wait for the propagation on db[1] dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) if len(dbs[1].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[1].networkNodes["network1"]), dbs[1].networkNodes["network1"]) } if n, ok := dbs[1].networks[dbs[1].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Try a quick leave/join err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) if len(dbs[0].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[0].networkNodes["network1"]), dbs[0].networkNodes["network1"]) } dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) if len(dbs[1].networkNodes["network1"]) != 2 { t.Fatalf("The networkNodes list has to have be 2 instead of %d - %v", len(dbs[1].networkNodes["network1"]), dbs[1].networkNodes["network1"]) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBGarbageCollection(t *testing.T) { keysWriteDelete := 5 config := DefaultConfig() config.reapEntryInterval = 30 * time.Second config.StatsPrintPeriod = 15 * time.Second dbs := createNetworkDBInstances(t, 3, "node", config) // 2 Nodes join network err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].CreateEntry("testTable", "network1", "key-"+strconv.Itoa(i), []byte("value")) assert.NilError(t, err) } time.Sleep(time.Second) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].DeleteEntry("testTable", "network1", "key-"+strconv.Itoa(i)) assert.NilError(t, err) } for i := 0; i < 2; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // from this point the timer for the garbage collection started, wait 5 seconds and then join a new node time.Sleep(5 * time.Second) err = dbs[2].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // at this point the entries should had been all deleted time.Sleep(30 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } // make sure that entries are not coming back time.Sleep(15 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } closeNetworkDBInstances(t, dbs) } func TestFindNode(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["active"] = &node{Node: memberlist.Node{Name: "active"}} dbs[0].failedNodes["failed"] = &node{Node: memberlist.Node{Name: "failed"}} dbs[0].leftNodes["left"] = &node{Node: memberlist.Node{Name: "left"}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) n, currState, m := dbs[0].findNode("active") assert.Check(t, n != nil) assert.Check(t, is.Equal("active", n.Name)) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, m != nil) // delete the entry manually delete(m, "active") // test if can be still find n, currState, m = dbs[0].findNode("active") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) n, currState, m = dbs[0].findNode("failed") assert.Check(t, n != nil) assert.Check(t, is.Equal("failed", n.Name)) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, m != nil) // find and remove n, currState, m = dbs[0].findNode("left") assert.Check(t, n != nil) assert.Check(t, is.Equal("left", n.Name)) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, m != nil) delete(m, "left") n, currState, m = dbs[0].findNode("left") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) closeNetworkDBInstances(t, dbs) } func TestChangeNodeState(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1"}} dbs[0].nodes["node2"] = &node{Node: memberlist.Node{Name: "node2"}} dbs[0].nodes["node3"] = &node{Node: memberlist.Node{Name: "node3"}} // active nodes is 4 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) n, currState, m := dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) // node1 to failed dbs[0].changeNodeState("node1", nodeFailedState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // node1 back to active dbs[0].changeNodeState("node1", nodeActiveState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, is.Equal(time.Duration(0), n.reapTime)) // node1 to left dbs[0].changeNodeState("node1", nodeLeftState) dbs[0].changeNodeState("node2", nodeLeftState) dbs[0].changeNodeState("node3", nodeLeftState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node2") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node2", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node3") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node3", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 1)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestNodeReincarnation(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1", Addr: net.ParseIP("192.168.1.1")}} dbs[0].leftNodes["node2"] = &node{Node: memberlist.Node{Name: "node2", Addr: net.ParseIP("192.168.1.2")}} dbs[0].failedNodes["node3"] = &node{Node: memberlist.Node{Name: "node3", Addr: net.ParseIP("192.168.1.3")}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) b := dbs[0].purgeReincarnation(&memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}) assert.Check(t, b) dbs[0].nodes["node4"] = &node{Node: memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.2")}) assert.Check(t, b) dbs[0].nodes["node5"] = &node{Node: memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.3")}) assert.Check(t, b) dbs[0].nodes["node6"] = &node{Node: memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.10")}) assert.Check(t, !b) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestParallelCreate(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestParallelDelete(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) assert.NilError(t, err) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].DeleteEntry("testTable", "testNetwork", "key") if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestNetworkDBIslands(t *testing.T) { pollTimeout := func() time.Duration { const defaultTimeout = 120 * time.Second dl, ok := t.Deadline() if !ok { return defaultTimeout } if d := time.Until(dl); d <= defaultTimeout { return d } return defaultTimeout } logrus.SetLevel(logrus.DebugLevel) conf := DefaultConfig() // Shorten durations to speed up test execution. conf.rejoinClusterDuration = conf.rejoinClusterDuration / 10 conf.rejoinClusterInterval = conf.rejoinClusterInterval / 10 dbs := createNetworkDBInstances(t, 5, "node", conf) // Get the node IP used currently node := dbs[0].nodes[dbs[0].config.NodeID] baseIPStr := node.Addr.String() // Node 0,1,2 are going to be the 3 bootstrap nodes members := []string{fmt.Sprintf("%s:%d", baseIPStr, dbs[0].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[1].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[2].config.BindPort)} // Rejoining will update the list of the bootstrap members for i := 3; i < 5; i++ { t.Logf("Re-joining: %d", i) assert.Check(t, dbs[i].Join(members)) } // Now the 3 bootstrap nodes will cleanly leave, and will be properly removed from the other 2 nodes for i := 0; i < 3; i++ { logrus.Infof("node %d leaving", i) dbs[i].Close() } checkDBs := make(map[string]*NetworkDB) for i := 3; i < 5; i++ { db := dbs[i] checkDBs[db.config.Hostname] = db } // Give some time to let the system propagate the messages and free up the ports check := func(t poll.LogT) poll.Result { // Verify that the nodes are actually all gone and marked appropiately for name, db := range checkDBs { db.RLock() if (len(db.leftNodes) != 3) || (len(db.failedNodes) != 0) { for name := range db.leftNodes { t.Logf("%s: Node %s left", db.config.Hostname, name) } for name := range db.failedNodes { t.Logf("%s: Node %s failed", db.config.Hostname, name) } db.RUnlock() return poll.Continue("%s:Waiting for all nodes to cleanly leave, left: %d, failed nodes: %d", name, len(db.leftNodes), len(db.failedNodes)) } db.RUnlock() t.Logf("%s: OK", name) delete(checkDBs, name) } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) // Spawn again the first 3 nodes with different names but same IP:port for i := 0; i < 3; i++ { logrus.Infof("node %d coming back", i) dbs[i].config.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) dbs[i] = launchNode(t, *dbs[i].config) } // Give some time for the reconnect routine to run, it runs every 6s. check = func(t poll.LogT) poll.Result { // Verify that the cluster is again all connected. Note that the 3 previous node did not do any join for i := 0; i < 5; i++ { db := dbs[i] db.RLock() if len(db.nodes) != 5 { db.RUnlock() return poll.Continue("%s:Waiting to connect to all nodes", dbs[i].config.Hostname) } if len(db.failedNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting for 0 failedNodes", dbs[i].config.Hostname) } if i < 3 { // nodes from 0 to 3 has no left nodes if len(db.leftNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting to have no leftNodes", dbs[i].config.Hostname) } } else { // nodes from 4 to 5 has the 3 previous left nodes if len(db.leftNodes) != 3 { db.RUnlock() return poll.Continue("%s:Waiting to have 3 leftNodes", dbs[i].config.Hostname) } } db.RUnlock() } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) closeNetworkDBInstances(t, dbs) }
package networkdb import ( "fmt" "log" "net" "os" "strconv" "sync/atomic" "testing" "time" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-events" "github.com/hashicorp/memberlist" "github.com/hashicorp/serf/serf" "github.com/sirupsen/logrus" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/poll" ) var dbPort int32 = 10000 func TestMain(m *testing.M) { os.WriteFile("/proc/sys/net/ipv6/conf/lo/disable_ipv6", []byte{'0', '\n'}, 0644) logrus.SetLevel(logrus.ErrorLevel) os.Exit(m.Run()) } func launchNode(t *testing.T, conf Config) *NetworkDB { t.Helper() db, err := New(&conf) assert.NilError(t, err) return db } func createNetworkDBInstances(t *testing.T, num int, namePrefix string, conf *Config) []*NetworkDB { t.Helper() var dbs []*NetworkDB for i := 0; i < num; i++ { localConfig := *conf localConfig.Hostname = fmt.Sprintf("%s%d", namePrefix, i+1) localConfig.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) localConfig.BindPort = int(atomic.AddInt32(&dbPort, 1)) db := launchNode(t, localConfig) if i != 0 { assert.Check(t, db.Join([]string{fmt.Sprintf("localhost:%d", db.config.BindPort-1)})) } dbs = append(dbs, db) } // Wait till the cluster creation is successful check := func(t poll.LogT) poll.Result { // Check that the cluster is properly created for i := 0; i < num; i++ { if num != len(dbs[i].ClusterPeers()) { return poll.Continue("%s:Waiting for cluster peers to be established", dbs[i].config.Hostname) } } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(2*time.Second), poll.WithTimeout(20*time.Second)) return dbs } func closeNetworkDBInstances(t *testing.T, dbs []*NetworkDB) { t.Helper() log.Print("Closing DB instances...") for _, db := range dbs { db.Close() } } func (db *NetworkDB) verifyNodeExistence(t *testing.T, node string, present bool) { t.Helper() for i := 0; i < 80; i++ { db.RLock() _, ok := db.nodes[node] db.RUnlock() if present && ok { return } if !present && !ok { return } time.Sleep(50 * time.Millisecond) } t.Errorf("%v(%v): Node existence verification for node %s failed", db.config.Hostname, db.config.NodeID, node) } func (db *NetworkDB) verifyNetworkExistence(t *testing.T, node string, id string, present bool) { t.Helper() const sleepInterval = 50 * time.Millisecond var maxRetries int64 if dl, ok := t.Deadline(); ok { maxRetries = int64(time.Until(dl) / sleepInterval) } else { maxRetries = 80 } for i := int64(0); i < maxRetries; i++ { db.RLock() nn, nnok := db.networks[node] db.RUnlock() if nnok { n, ok := nn[id] if present && ok { return } if !present && ((ok && n.leaving) || !ok) { return } } time.Sleep(sleepInterval) } t.Error("Network existence verification failed") } func (db *NetworkDB) verifyEntryExistence(t *testing.T, tname, nid, key, value string, present bool) { t.Helper() n := 80 for i := 0; i < n; i++ { entry, err := db.getEntry(tname, nid, key) if present && err == nil && string(entry.value) == value { return } if !present && ((err == nil && entry.deleting) || (err != nil)) { return } if i == n-1 && !present && err != nil { return } time.Sleep(50 * time.Millisecond) } t.Errorf("Entry existence verification test failed for %v(%v)", db.config.Hostname, db.config.NodeID) } func testWatch(t *testing.T, ch chan events.Event, ev interface{}, tname, nid, key, value string) { t.Helper() select { case rcvdEv := <-ch: assert.Check(t, is.Equal(fmt.Sprintf("%T", rcvdEv), fmt.Sprintf("%T", ev))) switch typ := rcvdEv.(type) { case CreateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case UpdateEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) assert.Check(t, is.Equal(value, string(typ.Value))) case DeleteEvent: assert.Check(t, is.Equal(tname, typ.Table)) assert.Check(t, is.Equal(nid, typ.NetworkID)) assert.Check(t, is.Equal(key, typ.Key)) } case <-time.After(time.Second): t.Fail() return } } func TestNetworkDBSimple(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetwork(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBJoinLeaveNetworks(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) n := 10 for i := 1; i <= n; i++ { err := dbs[0].JoinNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].JoinNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), true) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), true) } for i := 1; i <= n; i++ { err := dbs[0].LeaveNetwork(fmt.Sprintf("network0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err := dbs[1].LeaveNetwork(fmt.Sprintf("network1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, fmt.Sprintf("network0%d", i), false) } for i := 1; i <= n; i++ { dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, fmt.Sprintf("network1%d", i), false) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntry(t *testing.T) { dbs := createNetworkDBInstances(t, 3, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[2].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDTableEntries(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) n := 10 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].CreateEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i), []byte(fmt.Sprintf("test_value1%d", i))) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), fmt.Sprintf("test_value1%d", i), true) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } // Verify deletes for i := 1; i <= n; i++ { err = dbs[0].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { err = dbs[1].DeleteEntry("test_table", "network1", fmt.Sprintf("test_key1%d", i)) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[0].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key1%d", i), "", false) assert.NilError(t, err) } for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), "", false) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeLeave(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) dbs[0].Close() dbs[1].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", false) dbs[1].Close() } func TestNetworkDBWatch(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) ch, cancel := dbs[1].Watch("", "", "") err = dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) testWatch(t, ch.C, CreateEvent{}, "test_table", "network1", "test_key", "test_value") err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) testWatch(t, ch.C, UpdateEvent{}, "test_table", "network1", "test_key", "test_updated_value") err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) testWatch(t, ch.C, DeleteEvent{}, "test_table", "network1", "test_key", "") cancel() closeNetworkDBInstances(t, dbs) } func TestNetworkDBBulkSync(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) n := 1000 for i := 1; i <= n; i++ { err = dbs[0].CreateEntry("test_table", "network1", fmt.Sprintf("test_key0%d", i), []byte(fmt.Sprintf("test_value0%d", i))) assert.NilError(t, err) } err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) for i := 1; i <= n; i++ { dbs[1].verifyEntryExistence(t, "test_table", "network1", fmt.Sprintf("test_key0%d", i), fmt.Sprintf("test_value0%d", i), true) assert.NilError(t, err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBCRUDMediumCluster(t *testing.T) { n := 5 dbs := createNetworkDBInstances(t, n, "node", DefaultConfig()) for i := 0; i < n; i++ { for j := 0; j < n; j++ { if i == j { continue } dbs[i].verifyNodeExistence(t, dbs[j].config.NodeID, true) } } for i := 0; i < n; i++ { err := dbs[i].JoinNetwork("network1") assert.NilError(t, err) } for i := 0; i < n; i++ { for j := 0; j < n; j++ { dbs[i].verifyNetworkExistence(t, dbs[j].config.NodeID, "network1", true) } } err := dbs[0].CreateEntry("test_table", "network1", "test_key", []byte("test_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_value", true) } err = dbs[0].UpdateEntry("test_table", "network1", "test_key", []byte("test_updated_value")) assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "test_updated_value", true) } err = dbs[0].DeleteEntry("test_table", "network1", "test_key") assert.NilError(t, err) for i := 1; i < n; i++ { dbs[i].verifyEntryExistence(t, "test_table", "network1", "test_key", "", false) } for i := 1; i < n; i++ { _, err = dbs[i].GetEntry("test_table", "network1", "test_key") assert.Check(t, is.ErrorContains(err, "")) assert.Check(t, is.Contains(err.Error(), "deleted and pending garbage collection"), err) } closeNetworkDBInstances(t, dbs) } func TestNetworkDBNodeJoinLeaveIteration(t *testing.T) { dbs := createNetworkDBInstances(t, 2, "node", DefaultConfig()) var ( dbIndex int32 staleNetworkTime [2]serf.LamportTime expectNodeCount int network = "network1" ) dbChangeWitness := func(t poll.LogT) poll.Result { db := dbs[dbIndex] networkTime := db.networkClock.Time() if networkTime <= staleNetworkTime[dbIndex] { return poll.Continue("network time is stale, no change registered yet.") } count := -1 db.Lock() if nodes, ok := db.networkNodes[network]; ok { count = len(nodes) } db.Unlock() if count != expectNodeCount { return poll.Continue("current number of nodes is %d, expect %d.", count, expectNodeCount) } return poll.Success() } // Single node Join/Leave staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbIndex, expectNodeCount = 0, 1 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) dbIndex, expectNodeCount = 0, 0 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) // Multiple nodes Join/Leave staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) // Wait for the propagation on db[0] dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) dbIndex, expectNodeCount = 0, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) if n, ok := dbs[0].networks[dbs[0].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Wait for the propagation on db[1] dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) dbIndex, expectNodeCount = 1, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) if n, ok := dbs[1].networks[dbs[1].config.NodeID]["network1"]; !ok || n.leaving { t.Fatalf("The network should not be marked as leaving:%t", n.leaving) } // Try a quick leave/join staleNetworkTime[0], staleNetworkTime[1] = dbs[0].networkClock.Time(), dbs[1].networkClock.Time() err = dbs[0].LeaveNetwork("network1") assert.NilError(t, err) err = dbs[0].JoinNetwork("network1") assert.NilError(t, err) dbs[0].verifyNetworkExistence(t, dbs[1].config.NodeID, "network1", true) dbIndex, expectNodeCount = 0, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) dbs[1].verifyNetworkExistence(t, dbs[0].config.NodeID, "network1", true) dbIndex, expectNodeCount = 1, 2 poll.WaitOn(t, dbChangeWitness, poll.WithTimeout(3*time.Second), poll.WithDelay(5*time.Millisecond)) closeNetworkDBInstances(t, dbs) } func TestNetworkDBGarbageCollection(t *testing.T) { keysWriteDelete := 5 config := DefaultConfig() config.reapEntryInterval = 30 * time.Second config.StatsPrintPeriod = 15 * time.Second dbs := createNetworkDBInstances(t, 3, "node", config) // 2 Nodes join network err := dbs[0].JoinNetwork("network1") assert.NilError(t, err) err = dbs[1].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].CreateEntry("testTable", "network1", "key-"+strconv.Itoa(i), []byte("value")) assert.NilError(t, err) } time.Sleep(time.Second) for i := 0; i < keysWriteDelete; i++ { err = dbs[i%2].DeleteEntry("testTable", "network1", "key-"+strconv.Itoa(i)) assert.NilError(t, err) } for i := 0; i < 2; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // from this point the timer for the garbage collection started, wait 5 seconds and then join a new node time.Sleep(5 * time.Second) err = dbs[2].JoinNetwork("network1") assert.NilError(t, err) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(keysWriteDelete, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries number should match") } // at this point the entries should had been all deleted time.Sleep(30 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } // make sure that entries are not coming back time.Sleep(15 * time.Second) for i := 0; i < 3; i++ { assert.Check(t, is.Equal(0, dbs[i].networks[dbs[i].config.NodeID]["network1"].entriesNumber), "entries should had been garbage collected") } closeNetworkDBInstances(t, dbs) } func TestFindNode(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["active"] = &node{Node: memberlist.Node{Name: "active"}} dbs[0].failedNodes["failed"] = &node{Node: memberlist.Node{Name: "failed"}} dbs[0].leftNodes["left"] = &node{Node: memberlist.Node{Name: "left"}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) n, currState, m := dbs[0].findNode("active") assert.Check(t, n != nil) assert.Check(t, is.Equal("active", n.Name)) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, m != nil) // delete the entry manually delete(m, "active") // test if can be still find n, currState, m = dbs[0].findNode("active") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) n, currState, m = dbs[0].findNode("failed") assert.Check(t, n != nil) assert.Check(t, is.Equal("failed", n.Name)) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, m != nil) // find and remove n, currState, m = dbs[0].findNode("left") assert.Check(t, n != nil) assert.Check(t, is.Equal("left", n.Name)) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, m != nil) delete(m, "left") n, currState, m = dbs[0].findNode("left") assert.Check(t, is.Nil(n)) assert.Check(t, is.Equal(nodeNotFound, currState)) assert.Check(t, is.Nil(m)) closeNetworkDBInstances(t, dbs) } func TestChangeNodeState(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1"}} dbs[0].nodes["node2"] = &node{Node: memberlist.Node{Name: "node2"}} dbs[0].nodes["node3"] = &node{Node: memberlist.Node{Name: "node3"}} // active nodes is 4 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) n, currState, m := dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) // node1 to failed dbs[0].changeNodeState("node1", nodeFailedState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeFailedState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // node1 back to active dbs[0].changeNodeState("node1", nodeActiveState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeActiveState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, is.Equal(time.Duration(0), n.reapTime)) // node1 to left dbs[0].changeNodeState("node1", nodeLeftState) dbs[0].changeNodeState("node2", nodeLeftState) dbs[0].changeNodeState("node3", nodeLeftState) n, currState, m = dbs[0].findNode("node1") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node1", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node2") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node2", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) n, currState, m = dbs[0].findNode("node3") assert.Check(t, n != nil) assert.Check(t, is.Equal(nodeLeftState, currState)) assert.Check(t, is.Equal("node3", n.Name)) assert.Check(t, m != nil) assert.Check(t, time.Duration(0) != n.reapTime) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 1)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestNodeReincarnation(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) dbs[0].nodes["node1"] = &node{Node: memberlist.Node{Name: "node1", Addr: net.ParseIP("192.168.1.1")}} dbs[0].leftNodes["node2"] = &node{Node: memberlist.Node{Name: "node2", Addr: net.ParseIP("192.168.1.2")}} dbs[0].failedNodes["node3"] = &node{Node: memberlist.Node{Name: "node3", Addr: net.ParseIP("192.168.1.3")}} // active nodes is 2 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 2)) assert.Check(t, is.Len(dbs[0].failedNodes, 1)) assert.Check(t, is.Len(dbs[0].leftNodes, 1)) b := dbs[0].purgeReincarnation(&memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}) assert.Check(t, b) dbs[0].nodes["node4"] = &node{Node: memberlist.Node{Name: "node4", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.2")}) assert.Check(t, b) dbs[0].nodes["node5"] = &node{Node: memberlist.Node{Name: "node5", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.3")}) assert.Check(t, b) dbs[0].nodes["node6"] = &node{Node: memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.1")}} b = dbs[0].purgeReincarnation(&memberlist.Node{Name: "node6", Addr: net.ParseIP("192.168.1.10")}) assert.Check(t, !b) // active nodes is 1 because the testing node is in the list assert.Check(t, is.Len(dbs[0].nodes, 4)) assert.Check(t, is.Len(dbs[0].failedNodes, 0)) assert.Check(t, is.Len(dbs[0].leftNodes, 3)) closeNetworkDBInstances(t, dbs) } func TestParallelCreate(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestParallelDelete(t *testing.T) { dbs := createNetworkDBInstances(t, 1, "node", DefaultConfig()) err := dbs[0].CreateEntry("testTable", "testNetwork", "key", []byte("value")) assert.NilError(t, err) startCh := make(chan int) doneCh := make(chan error) var success int32 for i := 0; i < 20; i++ { go func() { <-startCh err := dbs[0].DeleteEntry("testTable", "testNetwork", "key") if err == nil { atomic.AddInt32(&success, 1) } doneCh <- err }() } close(startCh) for i := 0; i < 20; i++ { <-doneCh } close(doneCh) // Only 1 write should have succeeded assert.Check(t, is.Equal(int32(1), success)) closeNetworkDBInstances(t, dbs) } func TestNetworkDBIslands(t *testing.T) { pollTimeout := func() time.Duration { const defaultTimeout = 120 * time.Second dl, ok := t.Deadline() if !ok { return defaultTimeout } if d := time.Until(dl); d <= defaultTimeout { return d } return defaultTimeout } logrus.SetLevel(logrus.DebugLevel) conf := DefaultConfig() // Shorten durations to speed up test execution. conf.rejoinClusterDuration = conf.rejoinClusterDuration / 10 conf.rejoinClusterInterval = conf.rejoinClusterInterval / 10 dbs := createNetworkDBInstances(t, 5, "node", conf) // Get the node IP used currently node := dbs[0].nodes[dbs[0].config.NodeID] baseIPStr := node.Addr.String() // Node 0,1,2 are going to be the 3 bootstrap nodes members := []string{fmt.Sprintf("%s:%d", baseIPStr, dbs[0].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[1].config.BindPort), fmt.Sprintf("%s:%d", baseIPStr, dbs[2].config.BindPort)} // Rejoining will update the list of the bootstrap members for i := 3; i < 5; i++ { t.Logf("Re-joining: %d", i) assert.Check(t, dbs[i].Join(members)) } // Now the 3 bootstrap nodes will cleanly leave, and will be properly removed from the other 2 nodes for i := 0; i < 3; i++ { logrus.Infof("node %d leaving", i) dbs[i].Close() } checkDBs := make(map[string]*NetworkDB) for i := 3; i < 5; i++ { db := dbs[i] checkDBs[db.config.Hostname] = db } // Give some time to let the system propagate the messages and free up the ports check := func(t poll.LogT) poll.Result { // Verify that the nodes are actually all gone and marked appropiately for name, db := range checkDBs { db.RLock() if (len(db.leftNodes) != 3) || (len(db.failedNodes) != 0) { for name := range db.leftNodes { t.Logf("%s: Node %s left", db.config.Hostname, name) } for name := range db.failedNodes { t.Logf("%s: Node %s failed", db.config.Hostname, name) } db.RUnlock() return poll.Continue("%s:Waiting for all nodes to cleanly leave, left: %d, failed nodes: %d", name, len(db.leftNodes), len(db.failedNodes)) } db.RUnlock() t.Logf("%s: OK", name) delete(checkDBs, name) } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) // Spawn again the first 3 nodes with different names but same IP:port for i := 0; i < 3; i++ { logrus.Infof("node %d coming back", i) dbs[i].config.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) dbs[i] = launchNode(t, *dbs[i].config) } // Give some time for the reconnect routine to run, it runs every 6s. check = func(t poll.LogT) poll.Result { // Verify that the cluster is again all connected. Note that the 3 previous node did not do any join for i := 0; i < 5; i++ { db := dbs[i] db.RLock() if len(db.nodes) != 5 { db.RUnlock() return poll.Continue("%s:Waiting to connect to all nodes", dbs[i].config.Hostname) } if len(db.failedNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting for 0 failedNodes", dbs[i].config.Hostname) } if i < 3 { // nodes from 0 to 3 has no left nodes if len(db.leftNodes) != 0 { db.RUnlock() return poll.Continue("%s:Waiting to have no leftNodes", dbs[i].config.Hostname) } } else { // nodes from 4 to 5 has the 3 previous left nodes if len(db.leftNodes) != 3 { db.RUnlock() return poll.Continue("%s:Waiting to have 3 leftNodes", dbs[i].config.Hostname) } } db.RUnlock() } return poll.Success() } poll.WaitOn(t, check, poll.WithDelay(time.Second), poll.WithTimeout(pollTimeout())) closeNetworkDBInstances(t, dbs) }
zq-david-wang
d515863abcf456dfd5a1c979a3432ba6763dbad3
49f021ebf00a76d74f5ce158244083e2dfba26fb
This can be addressed in a follow-up PR since it's all private to the one test
corhere
4,476
moby/moby
42,791
cmd/dockerd: add the link of "the documentation"
Add the link to `dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation` error for ease of reference
null
2021-08-27 05:09:43+00:00
2021-08-27 13:50:15+00:00
cmd/dockerd/daemon.go
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
AkihiroSuda
2b70006e3bfa492b8641ff443493983d832955f4
c4040417b6fe21911dc7ab5e57db27519dd44a6a
nit; can be ", see https://.." now
tonistiigi
4,477
moby/moby
42,791
cmd/dockerd: add the link of "the documentation"
Add the link to `dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation` error for ease of reference
null
2021-08-27 05:09:43+00:00
2021-08-27 13:50:15+00:00
cmd/dockerd/daemon.go
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
AkihiroSuda
2b70006e3bfa492b8641ff443493983d832955f4
c4040417b6fe21911dc7ab5e57db27519dd44a6a
done
AkihiroSuda
4,478
moby/moby
42,791
cmd/dockerd: add the link of "the documentation"
Add the link to `dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation` error for ease of reference
null
2021-08-27 05:09:43+00:00
2021-08-27 13:50:15+00:00
cmd/dockerd/daemon.go
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
AkihiroSuda
2b70006e3bfa492b8641ff443493983d832955f4
c4040417b6fe21911dc7ab5e57db27519dd44a6a
While we're changing; the sentence contains `see` twice. Perhaps we should fix that as well. How about; ```suggestion return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") ``` We could decide to wrap the error to multiple lines (also making it easier to copy/paste the URL if needed); ``` The dockerd daemon needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, refer to the documentation: https://docs.docker.com/go/rootless/ ```
thaJeztah
4,479
moby/moby
42,791
cmd/dockerd: add the link of "the documentation"
Add the link to `dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation` error for ease of reference
null
2021-08-27 05:09:43+00:00
2021-08-27 13:50:15+00:00
cmd/dockerd/daemon.go
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
AkihiroSuda
2b70006e3bfa492b8641ff443493983d832955f4
c4040417b6fe21911dc7ab5e57db27519dd44a6a
Updated the text. I'm not sure how we could have multi-line errors. (Maybe we don't want to embed "\n" directly into error strings?)
AkihiroSuda
4,480
moby/moby
42,791
cmd/dockerd: add the link of "the documentation"
Add the link to `dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation` error for ease of reference
null
2021-08-27 05:09:43+00:00
2021-08-27 13:50:15+00:00
cmd/dockerd/daemon.go
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
package main import ( "context" "crypto/tls" "fmt" "net" "os" "path/filepath" "runtime" "strings" "time" containerddefaults "github.com/containerd/containerd/defaults" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" distributionrouter "github.com/docker/docker/api/server/router/distribution" grpcrouter "github.com/docker/docker/api/server/router/grpc" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/cli/debug" "github.com/docker/docker/cmd/dockerd/trap" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd/supervisor" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin" "github.com/docker/docker/rootless" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/tlsconfig" swarmapi "github.com/docker/swarmkit/api" "github.com/moby/buildkit/session" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *config.Config configFile *string flags *pflag.FlagSet api *apiserver.Server d *daemon.Daemon authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins } // NewDaemonCli returns a daemon CLI func NewDaemonCli() *DaemonCli { return &DaemonCli{} } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } if opts.Validate { // If config wasn't OK we wouldn't have made it this far. fmt.Fprintln(os.Stderr, "configuration OK") return nil } warnOnDeprecatedConfigOptions(cli.Config) if err := configureDaemonLogs(cli.Config); err != nil { return err } logrus.Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags if cli.Config.Debug { debug.Enable() } if cli.Config.Experimental { logrus.Warn("Running experimental build") } if cli.Config.IsRootless() { logrus.Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { logrus.Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } } // return human-friendly error before creating files if runtime.GOOS == "linux" && os.Geteuid() != 0 { return fmt.Errorf("dockerd needs to be started with root privileges. To run dockerd in rootless mode as an unprivileged user, see https://docs.docker.com/go/rootless/") } if err := setDefaultUmask(); err != nil { return err } // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { return err } if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil { return err } potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot} if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { return errors.Wrap(err, "failed to start daemon") } potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := pf.Remove(); err != nil { logrus.Error(err) } }() } if cli.Config.IsRootless() { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } serverConfig, err := newAPIServerConfig(cli) if err != nil { return errors.Wrap(err, "failed to create API server") } cli.api = apiserver.New(serverConfig) hosts, err := loadListeners(cli, serverConfig) if err != nil { return errors.Wrap(err, "failed to load listeners") } ctx, cancel := context.WithCancel(context.Background()) waitForContainerDShutdown, err := cli.initContainerD(ctx) if waitForContainerDShutdown != nil { defer waitForContainerDShutdown(10 * time.Second) } if err != nil { cancel() return err } defer cancel() stopc := make(chan bool) defer close(stopc) trap.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return }, logrus.StandardLogger()) // Notify that the API is active, but before daemon is set up. preNotifyReady() pluginStore := plugin.NewStore() if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { logrus.Fatalf("Error creating middlewares: %v", err) } d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore) if err != nil { return errors.Wrap(err, "failed to start daemon") } d.StoreHosts(hosts) // validate after NewDaemon has restored enabled plugins. Don't change order. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { return errors.Wrap(err, "failed to validate authorization plugin") } cli.d = d if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { return errors.Wrap(err, "failed to start metrics server") } c, err := createAndStartCluster(cli, d) if err != nil { logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint // and is not yet running now that we have successfully // initialized the cluster. d.RestartSwarmContainers() logrus.Info("Daemon has completed initialization") routerOptions, err := newRouterOptions(cli.Config, d) if err != nil { return err } routerOptions.api = cli.api routerOptions.cluster = c initRouter(routerOptions) go d.ProcessClusterNotifications(ctx, c.GetWatchStream()) cli.setupConfigReloadTrap() // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifyReady() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait c.Cleanup() // notify systemd that we're shutting down notifyStopping() shutdownDaemon(d) // Stop notification processing and any background processes cancel() if errAPI != nil { return errors.Wrap(errAPI, "shutting down due to ServeAPI error") } logrus.Info("Daemon shutdown complete") return nil } type routerOptions struct { sessionManager *session.Manager buildBackend *buildbackend.Backend features *map[string]bool buildkit *buildkit.Builder daemon *daemon.Daemon api *apiserver.Server cluster *cluster.Cluster } func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) { opts := routerOptions{} sm, err := session.NewManager() if err != nil { return opts, errors.Wrap(err, "failed to create sessionmanager") } manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping()) if err != nil { return opts, err } cgroupParent := newCgroupParent(config) bk, err := buildkit.New(buildkit.Opt{ SessionManager: sm, Root: filepath.Join(config.Root, "buildkit"), Dist: d.DistributionServices(), NetworkController: d.NetworkController(), DefaultCgroupParent: cgroupParent, RegistryHosts: d.RegistryHosts(), BuilderConfig: config.Builder, Rootless: d.Rootless(), IdentityMapping: d.IdentityMapping(), DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), }) if err != nil { return opts, err } bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) if err != nil { return opts, errors.Wrap(err, "failed to create buildmanager") } return routerOptions{ sessionManager: sm, buildBackend: bb, buildkit: bk, features: d.Features(), daemon: d, }, nil } func (cli *DaemonCli) reloadConfig() { reload := func(c *config.Config) { // Revalidate and reload the authorization plugins if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } if c.IsValueSet("debug") { debugEnabled := debug.IsEnabled() switch { case debugEnabled && !c.Debug: // disable debug debug.Disable() case c.Debug && !debugEnabled: // enable debug debug.Enable() } } } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } func (cli *DaemonCli) stop() { cli.api.Close() } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() if shutdownTimeout < 0 { <-ch logrus.Debug("Clean shutdown succeeded") return } timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second) defer timeout.Stop() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-timeout.C: logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { conf := opts.daemonConfig flags := opts.flags conf.Debug = opts.Debug conf.Hosts = opts.Hosts conf.LogLevel = opts.LogLevel if opts.flags.Changed(FlagTLS) { conf.TLS = &opts.TLS } if opts.flags.Changed(FlagTLSVerify) { conf.TLSVerify = &opts.TLSVerify v := true conf.TLS = &v } conf.CommonTLSOptions = config.CommonTLSOptions{} if opts.TLSOptions != nil { conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile } if conf.TrustKeyPath == "" { daemonConfDir, err := getDaemonConfDir(conf.Root) if err != nil { return nil, err } conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile) } if flags.Changed("graph") && flags.Changed("data-root") { return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { conf = c } } if err := config.Validate(conf); err != nil { return nil, err } if flags.Changed("graph") { logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) } // Check if duplicate label-keys with different values are found newLabels, err := config.GetConflictFreeLabels(conf.Labels) if err != nil { return nil, err } conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if conf.IsValueSet(FlagTLSVerify) { v := true conf.TLS = &v } if conf.TLSVerify == nil && conf.TLS != nil { conf.TLSVerify = conf.TLS } return conf, nil } func warnOnDeprecatedConfigOptions(config *config.Config) { if config.ClusterAdvertise != "" { logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`) } if config.ClusterStore != "" { logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`) } if len(config.ClusterOpts) > 0 { logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`) } } func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{ GetSysInfo: func() *sysinfo.SysInfo { return opts.daemon.RawSysInfo() }, } routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked checkpointrouter.NewRouter(opts.daemon, decoder), container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter(opts.daemon.ImageService()), systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features), volume.NewRouter(opts.daemon.VolumesService()), build.NewRouter(opts.buildBackend, opts.daemon, opts.features), sessionrouter.NewRouter(opts.sessionManager), swarmrouter.NewRouter(opts.cluster), pluginrouter.NewRouter(opts.daemon.PluginManager()), distributionrouter.NewRouter(opts.daemon.ImageService()), } grpcBackends := []grpcrouter.Backend{} for _, b := range []interface{}{opts.daemon, opts.buildBackend} { if b, ok := b.(grpcrouter.Backend); ok { grpcBackends = append(grpcBackends, b) } } if len(grpcBackends) > 0 { routers = append(routers, grpcrouter.NewRouter(grpcBackends...)) } if opts.daemon.NetworkControllerEnabled() { routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { experimental.Enable() } } } } opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) { opts, err := cli.getPlatformContainerdDaemonOpts() if err != nil { return nil, err } if cli.Config.Debug { opts = append(opts, supervisor.WithLogLevel("debug")) } else if cli.Config.LogLevel != "" { opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel)) } if !cli.Config.CriContainerd { opts = append(opts, supervisor.WithPlugin("cri", nil)) } return opts, nil } func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { serverConfig := &apiserver.Config{ Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, CorsHeaders: cli.Config.CorsHeaders, } if cli.Config.TLS != nil && *cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, ExclusiveRootPools: true, } if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { return nil, err } serverConfig.TLSConfig = tlsConfig } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } return serverConfig, nil } // checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify // Going forward we do not want to support a scenario where dockerd listens // on TCP without either TLS client auth (or an explicit opt-in to disable it) func checkTLSAuthOK(c *config.Config) bool { if c.TLS == nil { // Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled // Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed return DefaultTLSValue } if !*c.TLS { // TLS is explicitly disabled, which is supported return true } if c.TLSVerify == nil { // this actually shouldn't happen since we set TLSVerify on the config object anyway // But in case it does get here, be cautious and assume this is not supported. return false } // Either TLSVerify is explicitly enabled or disabled, both cases are supported return true } func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { var hosts []string seen := make(map[string]struct{}, len(cli.Config.Hosts)) useTLS := DefaultTLSValue if cli.Config.TLS != nil { useTLS = *cli.Config.TLS } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil { return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i]) } if _, ok := seen[cli.Config.Hosts[i]]; ok { continue } seen[cli.Config.Hosts[i]] = struct{}{} protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) } proto := protoAddrParts[0] addr := protoAddrParts[1] // It's a bad idea to bind to TCP without tlsverify. authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" // We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support if !checkTLSAuthOK(cli.Config) { ipAddr, _, err := net.SplitHostPort(addr) if err != nil { return nil, errors.Wrap(err, "error parsing tcp address") } // shortcut all this extra stuff for literal "localhost" // -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here. if ipAddr != "localhost" { ip := net.ParseIP(ipAddr) if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } } } } ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return nil, err } // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { return nil, err } } logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, protoAddrParts[1]) cli.api.Accept(addr, ls...) } return hosts, nil } func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { name, _ := os.Hostname() // Use a buffered channel to pass changes from store watch API to daemon // A buffer allows store watch API and daemon processing to not wait for each other watchStream := make(chan *swarmapi.WatchMessage, 32) c, err := cluster.New(cluster.Config{ Root: cli.Config.Root, Name: name, Backend: d, VolumeBackend: d.VolumesService(), ImageBackend: d.ImageService(), PluginBackend: d.PluginManager(), NetworkSubnetsProvider: d, DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, RaftElectionTick: cli.Config.SwarmRaftElectionTick, RuntimeRoot: cli.getSwarmRunRoot(), WatchStream: watchStream, }) if err != nil { return nil, err } d.SetCluster(c) err = c.Start() return c, err } // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } return nil } func systemContainerdRunning(honorXDG bool) (string, bool, error) { addr := containerddefaults.DefaultAddress if honorXDG { runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", false, err } addr = filepath.Join(runtimeDir, "containerd", "containerd.sock") } _, err := os.Lstat(addr) return addr, err == nil, nil } // configureDaemonLogs sets the logrus logging level and formatting func configureDaemonLogs(conf *config.Config) error { if conf.LogLevel != "" { lvl, err := logrus.ParseLevel(conf.LogLevel) if err != nil { return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } logrus.SetFormatter(&logrus.TextFormatter{ TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: conf.RawLogs, FullTimestamp: true, }) return nil }
AkihiroSuda
2b70006e3bfa492b8641ff443493983d832955f4
c4040417b6fe21911dc7ab5e57db27519dd44a6a
Doesn't it print errors as-is (including newlines?) (haven't tried) This error will always be local, so we could just print it here. Instead of course. I was thinking we did something similar for the API exposed on non-TLS, but that was returned through the API; https://github.com/moby/moby/blob/7681a3eb40ab49341d4e88420dafc7f4944ac7b7/daemon/info.go#L197-L200 I'm fine with the current version though, was just thinking if it would make it slightly easier to read (and use).
thaJeztah
4,481
moby/moby
42,785
Fixed docker.internal.gateway not displaying properly on live restore
fixes https://github.com/moby/moby/issues/42753 …erly <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** I fixed the issue 42573, where the docker.internal.host-gateway was not assigned to containers after a reboot, when live-restore was enabled. This was to do with containers that ran after the docker daemon was restarted. **- How I did it** There was a function that assigned the host gateway IP addresses. I made sure that the IP address was also called inside the conditional that ran when there were active sandboxes. **- How to verify it** #42753 - details the steps **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> This pull request is a change to remove the recurrence of #42753 , by writing HostGatewayIP to a function and calling that once in the normal case and once when the sandboxes(containers) are running during daemon restart. The pull request is a WIP since I am yet to write a unit test. I would need your guidance for it. I am not sure how to call the API for the test, for docker run --add-host=.... busybox in script form and getting the output. I know how to do it with fork/subprocess, but am not sure of the API approach to it. **- A picture of a cute animal (not mandatory but encouraged)** ![Not mine, but posting it here.](https://ichef.bbci.co.uk/news/976/cpsprodpb/3B4B/production/_109897151_otternew.jpg)
null
2021-08-25 21:06:53+00:00
2021-11-16 04:26:21+00:00
daemon/daemon_unix.go
//go:build linux || freebsd // +build linux freebsd package daemon // import "github.com/docker/docker/daemon" import ( "bufio" "context" "fmt" "net" "os" "path/filepath" "runtime" "runtime/debug" "strconv" "strings" "sync" "time" "github.com/containerd/cgroups" statsV1 "github.com/containerd/cgroups/stats/v1" statsV2 "github.com/containerd/cgroups/v2/stats" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/blkiodev" pblkiodev "github.com/docker/docker/api/types/blkiodev" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/initlayer" "github.com/docker/docker/errdefs" "github.com/docker/docker/libcontainerd/remote" "github.com/docker/docker/libnetwork" nwconfig "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/drivers/bridge" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/netutils" "github.com/docker/docker/libnetwork/options" lntypes "github.com/docker/docker/libnetwork/types" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/runconfig" volumemounts "github.com/docker/docker/volume/mounts" "github.com/moby/sys/mount" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" ) const ( isWindows = false // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 linuxMinCPUShares = 2 linuxMaxCPUShares = 262144 platformSupported = true // It's not kernel limit, we want this 6M limit to account for overhead during startup, and to supply a reasonable functional container linuxMinMemory = 6291456 // constants for remapped root settings defaultIDSpecifier = "default" defaultRemappedID = "dockremap" // constant for cgroup drivers cgroupFsDriver = "cgroupfs" cgroupSystemdDriver = "systemd" cgroupNoneDriver = "none" ) type containerGetter interface { GetContainer(string) (*container.Container, error) } func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory { memory := specs.LinuxMemory{} if config.Memory > 0 { memory.Limit = &config.Memory } if config.MemoryReservation > 0 { memory.Reservation = &config.MemoryReservation } if config.MemorySwap > 0 { memory.Swap = &config.MemorySwap } if config.MemorySwappiness != nil { swappiness := uint64(*config.MemorySwappiness) memory.Swappiness = &swappiness } if config.OomKillDisable != nil { memory.DisableOOMKiller = config.OomKillDisable } if config.KernelMemory != 0 { memory.Kernel = &config.KernelMemory } if config.KernelMemoryTCP != 0 { memory.KernelTCP = &config.KernelMemoryTCP } return &memory } func getPidsLimit(config containertypes.Resources) *specs.LinuxPids { if config.PidsLimit == nil { return nil } if *config.PidsLimit <= 0 { // docker API allows 0 and negative values to unset this to be consistent // with default values. When updating values, runc requires -1 to unset // the previous limit. return &specs.LinuxPids{Limit: -1} } return &specs.LinuxPids{Limit: *config.PidsLimit} } func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) { cpu := specs.LinuxCPU{} if config.CPUShares < 0 { return nil, fmt.Errorf("shares: invalid argument") } if config.CPUShares >= 0 { shares := uint64(config.CPUShares) cpu.Shares = &shares } if config.CpusetCpus != "" { cpu.Cpus = config.CpusetCpus } if config.CpusetMems != "" { cpu.Mems = config.CpusetMems } if config.NanoCPUs > 0 { // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt period := uint64(100 * time.Millisecond / time.Microsecond) quota := config.NanoCPUs * int64(period) / 1e9 cpu.Period = &period cpu.Quota = &quota } if config.CPUPeriod != 0 { period := uint64(config.CPUPeriod) cpu.Period = &period } if config.CPUQuota != 0 { q := config.CPUQuota cpu.Quota = &q } if config.CPURealtimePeriod != 0 { period := uint64(config.CPURealtimePeriod) cpu.RealtimePeriod = &period } if config.CPURealtimeRuntime != 0 { c := config.CPURealtimeRuntime cpu.RealtimeRuntime = &c } return &cpu, nil } func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) { var stat unix.Stat_t var blkioWeightDevices []specs.LinuxWeightDevice for _, weightDevice := range config.BlkioWeightDevice { if err := unix.Stat(weightDevice.Path, &stat); err != nil { return nil, errors.WithStack(&os.PathError{Op: "stat", Path: weightDevice.Path, Err: err}) } weight := weightDevice.Weight d := specs.LinuxWeightDevice{Weight: &weight} // The type is 32bit on mips. d.Major = int64(unix.Major(uint64(stat.Rdev))) //nolint: unconvert d.Minor = int64(unix.Minor(uint64(stat.Rdev))) //nolint: unconvert blkioWeightDevices = append(blkioWeightDevices, d) } return blkioWeightDevices, nil } func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { container.NoNewPrivileges = daemon.configStore.NoNewPrivileges return parseSecurityOpt(container, hostConfig) } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { var ( labelOpts []string err error ) for _, opt := range config.SecurityOpt { if opt == "no-new-privileges" { container.NoNewPrivileges = true continue } if opt == "disable" { labelOpts = append(labelOpts, "disable") continue } var con []string if strings.Contains(opt, "=") { con = strings.SplitN(opt, "=", 2) } else if strings.Contains(opt, ":") { con = strings.SplitN(opt, ":", 2) logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.") } if len(con) != 2 { return fmt.Errorf("invalid --security-opt 1: %q", opt) } switch con[0] { case "label": labelOpts = append(labelOpts, con[1]) case "apparmor": container.AppArmorProfile = con[1] case "seccomp": container.SeccompProfile = con[1] case "no-new-privileges": noNewPrivileges, err := strconv.ParseBool(con[1]) if err != nil { return fmt.Errorf("invalid --security-opt 2: %q", opt) } container.NoNewPrivileges = noNewPrivileges default: return fmt.Errorf("invalid --security-opt 2: %q", opt) } } container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) return err } func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) { var throttleDevices []specs.LinuxThrottleDevice var stat unix.Stat_t for _, d := range devs { if err := unix.Stat(d.Path, &stat); err != nil { return nil, errors.WithStack(&os.PathError{Op: "stat", Path: d.Path, Err: err}) } d := specs.LinuxThrottleDevice{Rate: d.Rate} // the type is 32bit on mips d.Major = int64(unix.Major(uint64(stat.Rdev))) //nolint: unconvert d.Minor = int64(unix.Minor(uint64(stat.Rdev))) //nolint: unconvert throttleDevices = append(throttleDevices, d) } return throttleDevices, nil } // adjustParallelLimit takes a number of objects and a proposed limit and // figures out if it's reasonable (and adjusts it accordingly). This is only // used for daemon startup, which does a lot of parallel loading of containers // (and if we exceed RLIMIT_NOFILE then we're in trouble). func adjustParallelLimit(n int, limit int) int { // Rule-of-thumb overhead factor (how many files will each goroutine open // simultaneously). Yes, this is ugly but to be frank this whole thing is // ugly. const overhead = 2 // On Linux, we need to ensure that parallelStartupJobs doesn't cause us to // exceed RLIMIT_NOFILE. If parallelStartupJobs is too large, we reduce it // and give a warning (since in theory the user should increase their // ulimits to the largest possible value for dockerd). var rlim unix.Rlimit if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil { logrus.Warnf("Couldn't find dockerd's RLIMIT_NOFILE to double-check startup parallelism factor: %v", err) return limit } softRlimit := int(rlim.Cur) // Much fewer containers than RLIMIT_NOFILE. No need to adjust anything. if softRlimit > overhead*n { return limit } // RLIMIT_NOFILE big enough, no need to adjust anything. if softRlimit > overhead*limit { return limit } logrus.Warnf("Found dockerd's open file ulimit (%v) is far too small -- consider increasing it significantly (at least %v)", softRlimit, overhead*limit) return softRlimit / overhead } func checkKernel() error { // Check for unsupported kernel versions // FIXME: it would be cleaner to not test for specific versions, but rather // test for specific functionalities. // Unfortunately we can't test for the feature "does not cause a kernel panic" // without actually causing a kernel panic, so we need this workaround until // the circumstances of pre-3.10 crashes are clearer. // For details see https://github.com/docker/docker/issues/407 // Docker 1.11 and above doesn't actually run on kernels older than 3.4, // due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4). if !kernel.CheckKernelVersion(3, 10, 0) { v, _ := kernel.GetKernelVersion() if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String()) } } return nil } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if adjustCPUShares && hostConfig.CPUShares > 0 { // Handle unsupported CPUShares if hostConfig.CPUShares < linuxMinCPUShares { logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) hostConfig.CPUShares = linuxMinCPUShares } else if hostConfig.CPUShares > linuxMaxCPUShares { logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) hostConfig.CPUShares = linuxMaxCPUShares } } if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { // By default, MemorySwap is set to twice the size of Memory. hostConfig.MemorySwap = hostConfig.Memory * 2 } if hostConfig.ShmSize == 0 { hostConfig.ShmSize = config.DefaultShmSize if daemon.configStore != nil { hostConfig.ShmSize = int64(daemon.configStore.ShmSize) } } // Set default IPC mode, if unset for container if hostConfig.IpcMode.IsEmpty() { m := config.DefaultIpcMode if daemon.configStore != nil { m = containertypes.IpcMode(daemon.configStore.IpcMode) } hostConfig.IpcMode = m } // Set default cgroup namespace mode, if unset for container if hostConfig.CgroupnsMode.IsEmpty() { // for cgroup v2: unshare cgroupns even for privileged containers // https://github.com/containers/libpod/pull/4374#issuecomment-549776387 if hostConfig.Privileged && cgroups.Mode() != cgroups.Unified { hostConfig.CgroupnsMode = containertypes.CgroupnsModeHost } else { m := containertypes.CgroupnsModeHost if cgroups.Mode() == cgroups.Unified { m = containertypes.CgroupnsModePrivate } if daemon.configStore != nil { m = containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode) } hostConfig.CgroupnsMode = m } } adaptSharedNamespaceContainer(daemon, hostConfig) var err error secOpts, err := daemon.generateSecurityOpt(hostConfig) if err != nil { return err } hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, secOpts...) if hostConfig.OomKillDisable == nil { defaultOomKillDisable := false hostConfig.OomKillDisable = &defaultOomKillDisable } return nil } // adaptSharedNamespaceContainer replaces container name with its ID in hostConfig. // To be more precisely, it modifies `container:name` to `container:ID` of PidMode, IpcMode // and NetworkMode. // // When a container shares its namespace with another container, use ID can keep the namespace // sharing connection between the two containers even the another container is renamed. func adaptSharedNamespaceContainer(daemon containerGetter, hostConfig *containertypes.HostConfig) { containerPrefix := "container:" if hostConfig.PidMode.IsContainer() { pidContainer := hostConfig.PidMode.Container() // if there is any error returned here, we just ignore it and leave it to be // handled in the following logic if c, err := daemon.GetContainer(pidContainer); err == nil { hostConfig.PidMode = containertypes.PidMode(containerPrefix + c.ID) } } if hostConfig.IpcMode.IsContainer() { ipcContainer := hostConfig.IpcMode.Container() if c, err := daemon.GetContainer(ipcContainer); err == nil { hostConfig.IpcMode = containertypes.IpcMode(containerPrefix + c.ID) } } if hostConfig.NetworkMode.IsContainer() { netContainer := hostConfig.NetworkMode.ConnectedContainer() if c, err := daemon.GetContainer(netContainer); err == nil { hostConfig.NetworkMode = containertypes.NetworkMode(containerPrefix + c.ID) } } } // verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration func verifyPlatformContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) (warnings []string, err error) { fixMemorySwappiness(resources) // memory subsystem checks and adjustments if resources.Memory != 0 && resources.Memory < linuxMinMemory { return warnings, fmt.Errorf("Minimum memory limit allowed is 6MB") } if resources.Memory > 0 && !sysInfo.MemoryLimit { warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.Memory = 0 resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.") resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") } if resources.Memory == 0 && resources.MemorySwap > 0 && !update { return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") } if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness { warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") resources.MemorySwappiness = nil } if resources.MemorySwappiness != nil { swappiness := *resources.MemorySwappiness if swappiness < 0 || swappiness > 100 { return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) } } if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.MemoryReservation = 0 } if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { return warnings, fmt.Errorf("Minimum memory reservation allowed is 6MB") } if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage") } if resources.KernelMemory > 0 { // Kernel memory limit is not supported on cgroup v2. // Even on cgroup v1, kernel memory limit (`kmem.limit_in_bytes`) has been deprecated since kernel 5.4. // https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0b5adf44cae99b3ebcc7 warnings = append(warnings, "Specifying a kernel memory limit is deprecated and will be removed in a future release.") } if resources.KernelMemory > 0 && !sysInfo.KernelMemory { warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.KernelMemory = 0 } if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") } if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) { warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") } if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point // warning the caller if they already wanted the feature to be off if *resources.OomKillDisable { warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") } resources.OomKillDisable = nil } if resources.OomKillDisable != nil && *resources.OomKillDisable && resources.Memory == 0 { warnings = append(warnings, "OOM killer is disabled for the container, but no memory limit is set, this can result in the system running out of resources.") } if resources.PidsLimit != nil && !sysInfo.PidsLimit { if *resources.PidsLimit > 0 { warnings = append(warnings, "Your kernel does not support PIDs limit capabilities or the cgroup is not mounted. PIDs limit discarded.") } resources.PidsLimit = nil } // cpu subsystem checks and adjustments if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 { return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set") } if resources.NanoCPUs > 0 && resources.CPUQuota > 0 { return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set") } if resources.NanoCPUs > 0 && !sysInfo.CPUCfs { return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU CFS scheduler or the cgroup is not mounted") } // The highest precision we could get on Linux is 0.001, by setting // cpu.cfs_period_us=1000ms // cpu.cfs_quota=1ms // See the following link for details: // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt // Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error. // The error message is 0.01 so that this is consistent with Windows if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } if resources.CPUShares > 0 && !sysInfo.CPUShares { warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") resources.CPUShares = 0 } if (resources.CPUPeriod != 0 || resources.CPUQuota != 0) && !sysInfo.CPUCfs { warnings = append(warnings, "Your kernel does not support CPU CFS scheduler. CPU period/quota discarded.") resources.CPUPeriod = 0 resources.CPUQuota = 0 } if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") } if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") } if resources.CPUPercent > 0 { warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS)) resources.CPUPercent = 0 } // cpuset subsystem checks and adjustments if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") resources.CpusetCpus = "" resources.CpusetMems = "" } cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) if err != nil { return warnings, errors.Wrapf(err, "Invalid value %s for cpuset cpus", resources.CpusetCpus) } if !cpusAvailable { return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) } memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) if err != nil { return warnings, errors.Wrapf(err, "Invalid value %s for cpuset mems", resources.CpusetMems) } if !memsAvailable { return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) } // blkio subsystem checks and adjustments if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") resources.BlkioWeight = 0 } if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") } if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) } if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} } if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.") resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} } return warnings, nil } func (daemon *Daemon) getCgroupDriver() string { if UsingSystemd(daemon.configStore) { return cgroupSystemdDriver } if daemon.Rootless() { return cgroupNoneDriver } return cgroupFsDriver } // getCD gets the raw value of the native.cgroupdriver option, if set. func getCD(config *config.Config) string { for _, option := range config.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { continue } return val } return "" } // verifyCgroupDriver validates native.cgroupdriver func verifyCgroupDriver(config *config.Config) error { cd := getCD(config) if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { return nil } if cd == cgroupNoneDriver { return fmt.Errorf("native.cgroupdriver option %s is internally used and cannot be specified manually", cd) } return fmt.Errorf("native.cgroupdriver option %s not supported", cd) } // UsingSystemd returns true if cli option includes native.cgroupdriver=systemd func UsingSystemd(config *config.Config) bool { cd := getCD(config) if cd == cgroupSystemdDriver { return true } // On cgroup v2 hosts, default to systemd driver if cd == "" && cgroups.Mode() == cgroups.Unified && isRunningSystemd() { return true } return false } var ( runningSystemd bool detectSystemd sync.Once ) // isRunningSystemd checks whether the host was booted with systemd as its init // system. This functions similarly to systemd's `sd_booted(3)`: internally, it // checks whether /run/systemd/system/ exists and is a directory. // http://www.freedesktop.org/software/systemd/man/sd_booted.html // // NOTE: This function comes from package github.com/coreos/go-systemd/util // It was borrowed here to avoid a dependency on cgo. func isRunningSystemd() bool { detectSystemd.Do(func() { fi, err := os.Lstat("/run/systemd/system") if err != nil { return } runningSystemd = fi.IsDir() }) return runningSystemd } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) { if hostConfig == nil { return nil, nil } sysInfo := daemon.RawSysInfo() w, err := verifyPlatformContainerResources(&hostConfig.Resources, sysInfo, update) // no matter err is nil or not, w could have data in itself. warnings = append(warnings, w...) if err != nil { return warnings, err } if hostConfig.ShmSize < 0 { return warnings, fmt.Errorf("SHM size can not be less than 0") } if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) } // ip-forwarding does not affect container with '--net=host' (or '--net=none') if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") } if hostConfig.NetworkMode.IsHost() && len(hostConfig.PortBindings) > 0 { warnings = append(warnings, "Published ports are discarded when using host network mode") } // check for various conflicting options with user namespaces if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { if hostConfig.Privileged { return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces. You must run the container in the host namespace when running privileged mode") } if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("cannot share the host's network namespace when user namespaces are enabled") } if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled") } } if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { // CgroupParent for systemd cgroup should be named as "xxx.slice" if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if hostConfig.Runtime == "" { hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() } if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) } parser := volumemounts.NewParser() for dest := range hostConfig.Tmpfs { if err := parser.ValidateTmpfsMountDestination(dest); err != nil { return warnings, err } } if !hostConfig.CgroupnsMode.Valid() { return warnings, fmt.Errorf("invalid cgroup namespace mode: %v", hostConfig.CgroupnsMode) } if hostConfig.CgroupnsMode.IsPrivate() { if !sysInfo.CgroupNamespaces { warnings = append(warnings, "Your kernel does not support cgroup namespaces. Cgroup namespace setting discarded.") } } if hostConfig.Runtime == config.LinuxV1RuntimeName || (hostConfig.Runtime == "" && daemon.configStore.DefaultRuntime == config.LinuxV1RuntimeName) { warnings = append(warnings, fmt.Sprintf("Configured runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName)) } return warnings, nil } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(conf *config.Config) error { if conf.ContainerdNamespace == conf.ContainerdPluginNamespace { return errors.New("containers namespace and plugins namespace cannot be the same") } // Check for mutually incompatible config options if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" { return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") } if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication { return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") } if conf.BridgeConfig.EnableIP6Tables && !conf.Experimental { return fmt.Errorf("ip6tables rules are only available if experimental features are enabled") } if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq { conf.BridgeConfig.EnableIPMasq = false } if err := verifyCgroupDriver(conf); err != nil { return err } if conf.CgroupParent != "" && UsingSystemd(conf) { if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") { return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if conf.Rootless && UsingSystemd(conf) && cgroups.Mode() != cgroups.Unified { return fmt.Errorf("exec-opt native.cgroupdriver=systemd requires cgroup v2 for rootless mode") } configureRuntimes(conf) if rtName := conf.GetDefaultRuntimeName(); rtName != "" { if conf.GetRuntime(rtName) == nil { return fmt.Errorf("specified default runtime '%s' does not exist", rtName) } if rtName == config.LinuxV1RuntimeName { logrus.Warnf("Configured default runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName) } } return nil } // checkSystem validates platform-specific requirements func checkSystem() error { return checkKernel() } // configureMaxThreads sets the Go runtime max threads threshold // which is 90% of the kernel setting from /proc/sys/kernel/threads-max func configureMaxThreads(config *config.Config) error { mt, err := os.ReadFile("/proc/sys/kernel/threads-max") if err != nil { return err } mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) if err != nil { return err } maxThreads := (mtint / 100) * 90 debug.SetMaxThreads(maxThreads) logrus.Debugf("Golang's threads limit set to %d", maxThreads) return nil } func overlaySupportsSelinux() (bool, error) { f, err := os.Open("/proc/kallsyms") if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.HasSuffix(s.Text(), " security_inode_copy_up") { return true, nil } } return false, s.Err() } // configureKernelSecuritySupport configures and validates security support for the kernel func configureKernelSecuritySupport(config *config.Config, driverName string) error { if config.EnableSelinuxSupport { if !selinux.GetEnabled() { logrus.Warn("Docker could not enable SELinux on the host system") return nil } if driverName == "overlay" || driverName == "overlay2" { // If driver is overlay or overlay2, make sure kernel // supports selinux with overlay. supported, err := overlaySupportsSelinux() if err != nil { return err } if !supported { logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName) } } } else { selinux.SetDisabled() } return nil } func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) if err != nil { return nil, err } controller, err := libnetwork.New(netOptions...) if err != nil { return nil, fmt.Errorf("error obtaining controller instance: %v", err) } if len(activeSandboxes) > 0 { logrus.Info("There are old running containers, the network config will not take affect") return controller, nil } // Initialize default network on "null" if n, _ := controller.NetworkByName("none"); n == nil { if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) } } // Initialize default network on "host" if n, _ := controller.NetworkByName("host"); n == nil { if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) } } // Clear stale bridge network if n, err := controller.NetworkByName("bridge"); err == nil { if err = n.Delete(); err != nil { return nil, fmt.Errorf("could not delete the default bridge network: %v", err) } if len(config.NetworkConfig.DefaultAddressPools.Value()) > 0 && !daemon.configStore.LiveRestoreEnabled { removeDefaultBridgeInterface() } } if !config.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, config); err != nil { return nil, err } } else { removeDefaultBridgeInterface() } // Set HostGatewayIP to the default bridge's IP if it is empty if daemon.configStore.HostGatewayIP == nil && controller != nil { if n, err := controller.NetworkByName("bridge"); err == nil { v4Info, v6Info := n.Info().IpamInfo() var gateway net.IP if len(v4Info) > 0 { gateway = v4Info[0].Gateway.IP } else if len(v6Info) > 0 { gateway = v6Info[0].Gateway.IP } daemon.configStore.HostGatewayIP = gateway } } return controller, nil } func driverOptions(config *config.Config) nwconfig.Option { return nwconfig.OptionDriverConfig("bridge", options.Generic{ netlabel.GenericData: options.Generic{ "EnableIPForwarding": config.BridgeConfig.EnableIPForward, "EnableIPTables": config.BridgeConfig.EnableIPTables, "EnableIP6Tables": config.BridgeConfig.EnableIP6Tables, "EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy, "UserlandProxyPath": config.BridgeConfig.UserlandProxyPath, }, }) } func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { bridgeName := bridge.DefaultBridgeName if config.BridgeConfig.Iface != "" { bridgeName = config.BridgeConfig.Iface } netOption := map[string]string{ bridge.BridgeName: bridgeName, bridge.DefaultBridge: strconv.FormatBool(true), netlabel.DriverMTU: strconv.Itoa(config.Mtu), bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq), bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication), } // --ip processing if config.BridgeConfig.DefaultIP != nil { netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String() } ipamV4Conf := &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) if err != nil { return errors.Wrap(err, "list bridge addresses failed") } nw := nwList[0] if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" { _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return errors.Wrap(err, "parse CIDR failed") } // Iterate through in case there are multiple addresses for the bridge for _, entry := range nwList { if fCIDR.Contains(entry.IP) { nw = entry break } } } ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) if hip.IsGlobalUnicast() { ipamV4Conf.Gateway = nw.IP.String() } if config.BridgeConfig.IP != "" { ip, ipNet, err := net.ParseCIDR(config.BridgeConfig.IP) if err != nil { return err } ipamV4Conf.PreferredPool = ipNet.String() ipamV4Conf.Gateway = ip.String() } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) } if config.BridgeConfig.FixedCIDR != "" { _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return err } ipamV4Conf.SubPool = fCIDR.String() } if config.BridgeConfig.DefaultGatewayIPv4 != nil { ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String() } var ( deferIPv6Alloc bool ipamV6Conf *libnetwork.IpamConf ) if config.BridgeConfig.EnableIPv6 && config.BridgeConfig.FixedCIDRv6 == "" { return errdefs.InvalidParameter(errors.New("IPv6 is enabled for the default bridge, but no subnet is configured. Specify an IPv6 subnet using --fixed-cidr-v6")) } else if config.BridgeConfig.FixedCIDRv6 != "" { _, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6) if err != nil { return err } // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has // at least 48 host bits, we need to guarantee the current behavior where the containers' // IPv6 addresses will be constructed based on the containers' interface MAC address. // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints // on this network until after the driver has created the endpoint and returned the // constructed address. Libnetwork will then reserve this address with the ipam driver. ones, _ := fCIDRv6.Mask.Size() deferIPv6Alloc = ones <= 80 ipamV6Conf = &libnetwork.IpamConf{ AuxAddresses: make(map[string]string), PreferredPool: fCIDRv6.String(), } // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 // address belongs to the same network, we need to inform libnetwork about it, so // that it can be reserved with IPAM and it will not be given away to somebody else for _, nw6 := range nw6List { if fCIDRv6.Contains(nw6.IP) { ipamV6Conf.Gateway = nw6.IP.String() break } } } if config.BridgeConfig.DefaultGatewayIPv6 != nil { if ipamV6Conf == nil { ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} } ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String() } v4Conf := []*libnetwork.IpamConf{ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} if ipamV6Conf != nil { v6Conf = append(v6Conf, ipamV6Conf) } // Initialize default network on "bridge" with the same name _, err = controller.NewNetwork("bridge", "bridge", "", libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6), libnetwork.NetworkOptionDriverOpts(netOption), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) if err != nil { return fmt.Errorf("Error creating default \"bridge\" network: %v", err) } return nil } // Remove default bridge interface if present (--bridge=none use case) func removeDefaultBridgeInterface() { if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil { if err := netlink.LinkDel(lnk); err != nil { logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) } } } func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error { return func(initPath containerfs.ContainerFS) error { return initlayer.Setup(initPath, idMapping.RootPair()) } } // Parse the remapped root (user namespace) option, which can be one of: // username - valid username from /etc/passwd // username:groupname - valid username; valid groupname from /etc/group // uid - 32-bit unsigned int valid Linux UID value // uid:gid - uid value; 32-bit unsigned int Linux GID value // // If no groupname is specified, and a username is specified, an attempt // will be made to lookup a gid for that username as a groupname // // If names are used, they are verified to exist in passwd/group func parseRemappedRoot(usergrp string) (string, string, error) { var ( userID, groupID int username, groupname string ) idparts := strings.Split(usergrp, ":") if len(idparts) > 2 { return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) } if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { // must be a uid; take it as valid userID = int(uid) luser, err := idtools.LookupUID(userID) if err != nil { return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) } username = luser.Name if len(idparts) == 1 { // if the uid was numeric and no gid was specified, take the uid as the gid groupID = userID lgrp, err := idtools.LookupGID(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) } groupname = lgrp.Name } } else { lookupName := idparts[0] // special case: if the user specified "default", they want Docker to create or // use (after creation) the "dockremap" user/group for root remapping if lookupName == defaultIDSpecifier { lookupName = defaultRemappedID } luser, err := idtools.LookupUser(lookupName) if err != nil && idparts[0] != defaultIDSpecifier { // error if the name requested isn't the special "dockremap" ID return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) } else if err != nil { // special case-- if the username == "default", then we have been asked // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} // ranges will be used for the user and group mappings in user namespaced containers _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) if err == nil { return defaultRemappedID, defaultRemappedID, nil } return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) } username = luser.Name if len(idparts) == 1 { // we only have a string username, and no group specified; look up gid from username as group group, err := idtools.LookupGroup(lookupName) if err != nil { return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) } groupname = group.Name } } if len(idparts) == 2 { // groupname or gid is separately specified and must be resolved // to an unsigned 32-bit gid if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { // must be a gid, take it as valid groupID = int(gid) lgrp, err := idtools.LookupGID(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) } groupname = lgrp.Name } else { // not a number; attempt a lookup if _, err := idtools.LookupGroup(idparts[1]); err != nil { return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) } groupname = idparts[1] } } return username, groupname, nil } func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) { if runtime.GOOS != "linux" && config.RemappedRoot != "" { return nil, fmt.Errorf("User namespaces are only supported on Linux") } // if the daemon was started with remapped root option, parse // the config option to the int uid,gid values if config.RemappedRoot != "" { username, groupname, err := parseRemappedRoot(config.RemappedRoot) if err != nil { return nil, err } if username == "root" { // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op // effectively logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") return &idtools.IdentityMapping{}, nil } logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s", username) // update remapped root setting now that we have resolved them to actual names config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) mappings, err := idtools.NewIdentityMapping(username) if err != nil { return nil, errors.Wrap(err, "Can't create ID mappings") } return mappings, nil } return &idtools.IdentityMapping{}, nil } func setupDaemonRoot(config *config.Config, rootDir string, remappedRoot idtools.Identity) error { config.Root = rootDir // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) // so that syscalls executing as non-root, operating on subdirectories of the graph root // (e.g. mounted layers of a container) can traverse this path. // The user namespace support will create subdirectories for the remapped root host uid:gid // pair owned by that same uid:gid pair for proper write access to those needed metadata and // layer content subtrees. if _, err := os.Stat(rootDir); err == nil { // root current exists; verify the access bits are correct by setting them if err = os.Chmod(rootDir, 0711); err != nil { return err } } else if os.IsNotExist(err) { // no root exists yet, create it 0711 with root:root ownership if err := os.MkdirAll(rootDir, 0711); err != nil { return err } } id := idtools.Identity{UID: idtools.CurrentIdentity().UID, GID: remappedRoot.GID} // First make sure the current root dir has the correct perms. if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil { return errors.Wrapf(err, "could not create or set daemon root permissions: %s", config.Root) } // if user namespaces are enabled we will create a subtree underneath the specified root // with any/all specified remapped root uid/gid options on the daemon creating // a new subdirectory with ownership set to the remapped uid/gid (so as to allow // `chdir()` to work for containers namespaced to that uid/gid) if config.RemappedRoot != "" { config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", remappedRoot.UID, remappedRoot.GID)) logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) // Create the root directory if it doesn't exist if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil { return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) } // we also need to verify that any pre-existing directories in the path to // the graphroot won't block access to remapped root--if any pre-existing directory // has strict permissions that don't allow "x", container start will fail, so // better to warn and fail now dirPath := config.Root for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if !idtools.CanAccess(dirPath, remappedRoot) { return fmt.Errorf("a subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories", config.Root) } } } if err := setupDaemonRootPropagation(config); err != nil { logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior") } return nil } func setupDaemonRootPropagation(cfg *config.Config) error { rootParentMount, mountOptions, err := getSourceMount(cfg.Root) if err != nil { return errors.Wrap(err, "error getting daemon root's parent mount") } var cleanupOldFile bool cleanupFile := getUnmountOnShutdownPath(cfg) defer func() { if !cleanupOldFile { return } if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) { logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file") } }() if hasMountInfoOption(mountOptions, sharedPropagationOption, slavePropagationOption) { cleanupOldFile = true return nil } if err := mount.MakeShared(cfg.Root); err != nil { return errors.Wrap(err, "could not setup daemon root propagation to shared") } // check the case where this may have already been a mount to itself. // If so then the daemon only performed a remount and should not try to unmount this later. if rootParentMount == cfg.Root { cleanupOldFile = true return nil } if err := os.MkdirAll(filepath.Dir(cleanupFile), 0700); err != nil { return errors.Wrap(err, "error creating dir to store mount cleanup file") } if err := os.WriteFile(cleanupFile, nil, 0600); err != nil { return errors.Wrap(err, "error writing file to signal mount cleanup on shutdown") } return nil } // getUnmountOnShutdownPath generates the path to used when writing the file that signals to the daemon that on shutdown // the daemon root should be unmounted. func getUnmountOnShutdownPath(config *config.Config) string { return filepath.Join(config.ExecRoot, "unmount-on-shutdown") } // registerLinks writes the links to a file. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { return nil } for _, l := range hostConfig.Links { name, alias, err := opts.ParseLink(l) if err != nil { return err } child, err := daemon.GetContainer(name) if err != nil { if errdefs.IsNotFound(err) { // Trying to link to a non-existing container is not valid, and // should return an "invalid parameter" error. Returning a "not // found" error here would make the client report the container's // image could not be found (see moby/moby#39823) err = errdefs.InvalidParameter(err) } return errors.Wrapf(err, "could not get container for %s", name) } for child.HostConfig.NetworkMode.IsContainer() { parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) child, err = daemon.GetContainer(parts[1]) if err != nil { if errdefs.IsNotFound(err) { // Trying to link to a non-existing container is not valid, and // should return an "invalid parameter" error. Returning a "not // found" error here would make the client report the container's // image could not be found (see moby/moby#39823) err = errdefs.InvalidParameter(err) } return errors.Wrapf(err, "Could not get container for %s", parts[1]) } } if child.HostConfig.NetworkMode.IsHost() { return runconfig.ErrConflictHostNetworkAndLinks } if err := daemon.registerLink(container, child, alias); err != nil { return err } } // After we load all the links into the daemon // set them to nil on the hostconfig _, err := container.WriteHostConfig() return err } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { return daemon.Unmount(container) } func copyBlkioEntry(entries []*statsV1.BlkIOEntry) []types.BlkioStatEntry { out := make([]types.BlkioStatEntry, len(entries)) for i, re := range entries { out[i] = types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: re.Op, Value: re.Value, } } return out } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning(c.ID) } cs, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { if strings.Contains(err.Error(), "container not found") { return nil, containerNotFound(c.ID) } return nil, err } s := &types.StatsJSON{} s.Read = cs.Read stats := cs.Metrics switch t := stats.(type) { case *statsV1.Metrics: return daemon.statsV1(s, t) case *statsV2.Metrics: return daemon.statsV2(s, t) default: return nil, errors.Errorf("unexpected type of metrics %+v", t) } } func (daemon *Daemon) statsV1(s *types.StatsJSON, stats *statsV1.Metrics) (*types.StatsJSON, error) { if stats.Blkio != nil { s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: copyBlkioEntry(stats.Blkio.IoServiceBytesRecursive), IoServicedRecursive: copyBlkioEntry(stats.Blkio.IoServicedRecursive), IoQueuedRecursive: copyBlkioEntry(stats.Blkio.IoQueuedRecursive), IoServiceTimeRecursive: copyBlkioEntry(stats.Blkio.IoServiceTimeRecursive), IoWaitTimeRecursive: copyBlkioEntry(stats.Blkio.IoWaitTimeRecursive), IoMergedRecursive: copyBlkioEntry(stats.Blkio.IoMergedRecursive), IoTimeRecursive: copyBlkioEntry(stats.Blkio.IoTimeRecursive), SectorsRecursive: copyBlkioEntry(stats.Blkio.SectorsRecursive), } } if stats.CPU != nil { s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: stats.CPU.Usage.Total, PercpuUsage: stats.CPU.Usage.PerCPU, UsageInKernelmode: stats.CPU.Usage.Kernel, UsageInUsermode: stats.CPU.Usage.User, }, ThrottlingData: types.ThrottlingData{ Periods: stats.CPU.Throttling.Periods, ThrottledPeriods: stats.CPU.Throttling.ThrottledPeriods, ThrottledTime: stats.CPU.Throttling.ThrottledTime, }, } } if stats.Memory != nil { raw := map[string]uint64{ "cache": stats.Memory.Cache, "rss": stats.Memory.RSS, "rss_huge": stats.Memory.RSSHuge, "mapped_file": stats.Memory.MappedFile, "dirty": stats.Memory.Dirty, "writeback": stats.Memory.Writeback, "pgpgin": stats.Memory.PgPgIn, "pgpgout": stats.Memory.PgPgOut, "pgfault": stats.Memory.PgFault, "pgmajfault": stats.Memory.PgMajFault, "inactive_anon": stats.Memory.InactiveAnon, "active_anon": stats.Memory.ActiveAnon, "inactive_file": stats.Memory.InactiveFile, "active_file": stats.Memory.ActiveFile, "unevictable": stats.Memory.Unevictable, "hierarchical_memory_limit": stats.Memory.HierarchicalMemoryLimit, "hierarchical_memsw_limit": stats.Memory.HierarchicalSwapLimit, "total_cache": stats.Memory.TotalCache, "total_rss": stats.Memory.TotalRSS, "total_rss_huge": stats.Memory.TotalRSSHuge, "total_mapped_file": stats.Memory.TotalMappedFile, "total_dirty": stats.Memory.TotalDirty, "total_writeback": stats.Memory.TotalWriteback, "total_pgpgin": stats.Memory.TotalPgPgIn, "total_pgpgout": stats.Memory.TotalPgPgOut, "total_pgfault": stats.Memory.TotalPgFault, "total_pgmajfault": stats.Memory.TotalPgMajFault, "total_inactive_anon": stats.Memory.TotalInactiveAnon, "total_active_anon": stats.Memory.TotalActiveAnon, "total_inactive_file": stats.Memory.TotalInactiveFile, "total_active_file": stats.Memory.TotalActiveFile, "total_unevictable": stats.Memory.TotalUnevictable, } if stats.Memory.Usage != nil { s.MemoryStats = types.MemoryStats{ Stats: raw, Usage: stats.Memory.Usage.Usage, MaxUsage: stats.Memory.Usage.Max, Limit: stats.Memory.Usage.Limit, Failcnt: stats.Memory.Usage.Failcnt, } } else { s.MemoryStats = types.MemoryStats{ Stats: raw, } } // if the container does not set memory limit, use the machineMemory if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { s.MemoryStats.Limit = daemon.machineMemory } } if stats.Pids != nil { s.PidsStats = types.PidsStats{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, } } return s, nil } func (daemon *Daemon) statsV2(s *types.StatsJSON, stats *statsV2.Metrics) (*types.StatsJSON, error) { if stats.Io != nil { var isbr []types.BlkioStatEntry for _, re := range stats.Io.Usage { isbr = append(isbr, types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: "read", Value: re.Rbytes, }, types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: "write", Value: re.Wbytes, }, ) } s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: isbr, // Other fields are unsupported } } if stats.CPU != nil { s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: stats.CPU.UsageUsec * 1000, // PercpuUsage is not supported UsageInKernelmode: stats.CPU.SystemUsec * 1000, UsageInUsermode: stats.CPU.UserUsec * 1000, }, ThrottlingData: types.ThrottlingData{ Periods: stats.CPU.NrPeriods, ThrottledPeriods: stats.CPU.NrThrottled, ThrottledTime: stats.CPU.ThrottledUsec * 1000, }, } } if stats.Memory != nil { s.MemoryStats = types.MemoryStats{ // Stats is not compatible with v1 Stats: map[string]uint64{ "anon": stats.Memory.Anon, "file": stats.Memory.File, "kernel_stack": stats.Memory.KernelStack, "slab": stats.Memory.Slab, "sock": stats.Memory.Sock, "shmem": stats.Memory.Shmem, "file_mapped": stats.Memory.FileMapped, "file_dirty": stats.Memory.FileDirty, "file_writeback": stats.Memory.FileWriteback, "anon_thp": stats.Memory.AnonThp, "inactive_anon": stats.Memory.InactiveAnon, "active_anon": stats.Memory.ActiveAnon, "inactive_file": stats.Memory.InactiveFile, "active_file": stats.Memory.ActiveFile, "unevictable": stats.Memory.Unevictable, "slab_reclaimable": stats.Memory.SlabReclaimable, "slab_unreclaimable": stats.Memory.SlabUnreclaimable, "pgfault": stats.Memory.Pgfault, "pgmajfault": stats.Memory.Pgmajfault, "workingset_refault": stats.Memory.WorkingsetRefault, "workingset_activate": stats.Memory.WorkingsetActivate, "workingset_nodereclaim": stats.Memory.WorkingsetNodereclaim, "pgrefill": stats.Memory.Pgrefill, "pgscan": stats.Memory.Pgscan, "pgsteal": stats.Memory.Pgsteal, "pgactivate": stats.Memory.Pgactivate, "pgdeactivate": stats.Memory.Pgdeactivate, "pglazyfree": stats.Memory.Pglazyfree, "pglazyfreed": stats.Memory.Pglazyfreed, "thp_fault_alloc": stats.Memory.ThpFaultAlloc, "thp_collapse_alloc": stats.Memory.ThpCollapseAlloc, }, Usage: stats.Memory.Usage, // MaxUsage is not supported Limit: stats.Memory.UsageLimit, } // if the container does not set memory limit, use the machineMemory if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { s.MemoryStats.Limit = daemon.machineMemory } if stats.MemoryEvents != nil { // Failcnt is set to the "oom" field of the "memory.events" file. // See https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html s.MemoryStats.Failcnt = stats.MemoryEvents.Oom } } if stats.Pids != nil { s.PidsStats = types.PidsStats{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, } } return s, nil } // setDefaultIsolation determines the default isolation mode for the // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { return nil } // setupDaemonProcess sets various settings for the daemon's process func setupDaemonProcess(config *config.Config) error { // setup the daemons oom_score_adj if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil { return err } if err := setMayDetachMounts(); err != nil { logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter") } return nil } // This is used to allow removal of mountpoints that may be mounted in other // namespaces on RHEL based kernels starting from RHEL 7.4. // Without this setting, removals on these RHEL based kernels may fail with // "device or resource busy". // This setting is not available in upstream kernels as it is not configurable, // but has been in the upstream kernels since 3.15. func setMayDetachMounts() error { f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0) if err != nil { if os.IsNotExist(err) { return nil } return errors.Wrap(err, "error opening may_detach_mounts kernel config file") } defer f.Close() _, err = f.WriteString("1") if os.IsPermission(err) { // Setting may_detach_mounts does not work in an // unprivileged container. Ignore the error, but log // it if we appear not to be in that situation. if !userns.RunningInUserNS() { logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1") } return nil } return err } func setupOOMScoreAdj(score int) error { if score == 0 { return nil } f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) if err != nil { return err } defer f.Close() stringScore := strconv.Itoa(score) _, err = f.WriteString(stringScore) if os.IsPermission(err) { // Setting oom_score_adj does not work in an // unprivileged container. Ignore the error, but log // it if we appear not to be in that situation. if !userns.RunningInUserNS() { logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore) } return nil } return err } func (daemon *Daemon) initCPURtController(mnt, path string) error { if path == "/" || path == "." { return nil } // Recursively create cgroup to ensure that the system and all parent cgroups have values set // for the period and runtime as this limits what the children can be set to. if err := daemon.initCPURtController(mnt, filepath.Dir(path)); err != nil { return err } path = filepath.Join(mnt, path) if err := os.MkdirAll(path, 0755); err != nil { return err } if err := maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil { return err } return maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path) } func maybeCreateCPURealTimeFile(configValue int64, file string, path string) error { if configValue == 0 { return nil } return os.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700) } func (daemon *Daemon) setupSeccompProfile() error { switch profile := daemon.configStore.SeccompProfile; profile { case "", config.SeccompProfileDefault: daemon.seccompProfilePath = config.SeccompProfileDefault case config.SeccompProfileUnconfined: daemon.seccompProfilePath = config.SeccompProfileUnconfined default: daemon.seccompProfilePath = profile b, err := os.ReadFile(profile) if err != nil { return fmt.Errorf("opening seccomp profile (%s) failed: %v", profile, err) } daemon.seccompProfile = b } return nil } // RawSysInfo returns *sysinfo.SysInfo . func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo { var siOpts []sysinfo.Opt if daemon.getCgroupDriver() == cgroupSystemdDriver { if euid := os.Getenv("ROOTLESSKIT_PARENT_EUID"); euid != "" { siOpts = append(siOpts, sysinfo.WithCgroup2GroupPath("/user.slice/user-"+euid+".slice")) } } return sysinfo.New(siOpts...) } func recursiveUnmount(target string) error { return mount.RecursiveUnmount(target) } func (daemon *Daemon) initLibcontainerd(ctx context.Context) error { var err error daemon.containerd, err = remote.NewClient( ctx, daemon.containerdCli, filepath.Join(daemon.configStore.ExecRoot, "containerd"), daemon.configStore.ContainerdNamespace, daemon, ) return err }
//go:build linux || freebsd // +build linux freebsd package daemon // import "github.com/docker/docker/daemon" import ( "bufio" "context" "fmt" "net" "os" "path/filepath" "runtime" "runtime/debug" "strconv" "strings" "sync" "time" "github.com/containerd/cgroups" statsV1 "github.com/containerd/cgroups/stats/v1" statsV2 "github.com/containerd/cgroups/v2/stats" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/blkiodev" pblkiodev "github.com/docker/docker/api/types/blkiodev" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/initlayer" "github.com/docker/docker/errdefs" "github.com/docker/docker/libcontainerd/remote" "github.com/docker/docker/libnetwork" nwconfig "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/drivers/bridge" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/netutils" "github.com/docker/docker/libnetwork/options" lntypes "github.com/docker/docker/libnetwork/types" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/runconfig" volumemounts "github.com/docker/docker/volume/mounts" "github.com/moby/sys/mount" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" ) const ( isWindows = false // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 linuxMinCPUShares = 2 linuxMaxCPUShares = 262144 platformSupported = true // It's not kernel limit, we want this 6M limit to account for overhead during startup, and to supply a reasonable functional container linuxMinMemory = 6291456 // constants for remapped root settings defaultIDSpecifier = "default" defaultRemappedID = "dockremap" // constant for cgroup drivers cgroupFsDriver = "cgroupfs" cgroupSystemdDriver = "systemd" cgroupNoneDriver = "none" ) type containerGetter interface { GetContainer(string) (*container.Container, error) } func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory { memory := specs.LinuxMemory{} if config.Memory > 0 { memory.Limit = &config.Memory } if config.MemoryReservation > 0 { memory.Reservation = &config.MemoryReservation } if config.MemorySwap > 0 { memory.Swap = &config.MemorySwap } if config.MemorySwappiness != nil { swappiness := uint64(*config.MemorySwappiness) memory.Swappiness = &swappiness } if config.OomKillDisable != nil { memory.DisableOOMKiller = config.OomKillDisable } if config.KernelMemory != 0 { memory.Kernel = &config.KernelMemory } if config.KernelMemoryTCP != 0 { memory.KernelTCP = &config.KernelMemoryTCP } return &memory } func getPidsLimit(config containertypes.Resources) *specs.LinuxPids { if config.PidsLimit == nil { return nil } if *config.PidsLimit <= 0 { // docker API allows 0 and negative values to unset this to be consistent // with default values. When updating values, runc requires -1 to unset // the previous limit. return &specs.LinuxPids{Limit: -1} } return &specs.LinuxPids{Limit: *config.PidsLimit} } func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) { cpu := specs.LinuxCPU{} if config.CPUShares < 0 { return nil, fmt.Errorf("shares: invalid argument") } if config.CPUShares >= 0 { shares := uint64(config.CPUShares) cpu.Shares = &shares } if config.CpusetCpus != "" { cpu.Cpus = config.CpusetCpus } if config.CpusetMems != "" { cpu.Mems = config.CpusetMems } if config.NanoCPUs > 0 { // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt period := uint64(100 * time.Millisecond / time.Microsecond) quota := config.NanoCPUs * int64(period) / 1e9 cpu.Period = &period cpu.Quota = &quota } if config.CPUPeriod != 0 { period := uint64(config.CPUPeriod) cpu.Period = &period } if config.CPUQuota != 0 { q := config.CPUQuota cpu.Quota = &q } if config.CPURealtimePeriod != 0 { period := uint64(config.CPURealtimePeriod) cpu.RealtimePeriod = &period } if config.CPURealtimeRuntime != 0 { c := config.CPURealtimeRuntime cpu.RealtimeRuntime = &c } return &cpu, nil } func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) { var stat unix.Stat_t var blkioWeightDevices []specs.LinuxWeightDevice for _, weightDevice := range config.BlkioWeightDevice { if err := unix.Stat(weightDevice.Path, &stat); err != nil { return nil, errors.WithStack(&os.PathError{Op: "stat", Path: weightDevice.Path, Err: err}) } weight := weightDevice.Weight d := specs.LinuxWeightDevice{Weight: &weight} // The type is 32bit on mips. d.Major = int64(unix.Major(uint64(stat.Rdev))) //nolint: unconvert d.Minor = int64(unix.Minor(uint64(stat.Rdev))) //nolint: unconvert blkioWeightDevices = append(blkioWeightDevices, d) } return blkioWeightDevices, nil } func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { container.NoNewPrivileges = daemon.configStore.NoNewPrivileges return parseSecurityOpt(container, hostConfig) } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { var ( labelOpts []string err error ) for _, opt := range config.SecurityOpt { if opt == "no-new-privileges" { container.NoNewPrivileges = true continue } if opt == "disable" { labelOpts = append(labelOpts, "disable") continue } var con []string if strings.Contains(opt, "=") { con = strings.SplitN(opt, "=", 2) } else if strings.Contains(opt, ":") { con = strings.SplitN(opt, ":", 2) logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.") } if len(con) != 2 { return fmt.Errorf("invalid --security-opt 1: %q", opt) } switch con[0] { case "label": labelOpts = append(labelOpts, con[1]) case "apparmor": container.AppArmorProfile = con[1] case "seccomp": container.SeccompProfile = con[1] case "no-new-privileges": noNewPrivileges, err := strconv.ParseBool(con[1]) if err != nil { return fmt.Errorf("invalid --security-opt 2: %q", opt) } container.NoNewPrivileges = noNewPrivileges default: return fmt.Errorf("invalid --security-opt 2: %q", opt) } } container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) return err } func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) { var throttleDevices []specs.LinuxThrottleDevice var stat unix.Stat_t for _, d := range devs { if err := unix.Stat(d.Path, &stat); err != nil { return nil, errors.WithStack(&os.PathError{Op: "stat", Path: d.Path, Err: err}) } d := specs.LinuxThrottleDevice{Rate: d.Rate} // the type is 32bit on mips d.Major = int64(unix.Major(uint64(stat.Rdev))) //nolint: unconvert d.Minor = int64(unix.Minor(uint64(stat.Rdev))) //nolint: unconvert throttleDevices = append(throttleDevices, d) } return throttleDevices, nil } // adjustParallelLimit takes a number of objects and a proposed limit and // figures out if it's reasonable (and adjusts it accordingly). This is only // used for daemon startup, which does a lot of parallel loading of containers // (and if we exceed RLIMIT_NOFILE then we're in trouble). func adjustParallelLimit(n int, limit int) int { // Rule-of-thumb overhead factor (how many files will each goroutine open // simultaneously). Yes, this is ugly but to be frank this whole thing is // ugly. const overhead = 2 // On Linux, we need to ensure that parallelStartupJobs doesn't cause us to // exceed RLIMIT_NOFILE. If parallelStartupJobs is too large, we reduce it // and give a warning (since in theory the user should increase their // ulimits to the largest possible value for dockerd). var rlim unix.Rlimit if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil { logrus.Warnf("Couldn't find dockerd's RLIMIT_NOFILE to double-check startup parallelism factor: %v", err) return limit } softRlimit := int(rlim.Cur) // Much fewer containers than RLIMIT_NOFILE. No need to adjust anything. if softRlimit > overhead*n { return limit } // RLIMIT_NOFILE big enough, no need to adjust anything. if softRlimit > overhead*limit { return limit } logrus.Warnf("Found dockerd's open file ulimit (%v) is far too small -- consider increasing it significantly (at least %v)", softRlimit, overhead*limit) return softRlimit / overhead } func checkKernel() error { // Check for unsupported kernel versions // FIXME: it would be cleaner to not test for specific versions, but rather // test for specific functionalities. // Unfortunately we can't test for the feature "does not cause a kernel panic" // without actually causing a kernel panic, so we need this workaround until // the circumstances of pre-3.10 crashes are clearer. // For details see https://github.com/docker/docker/issues/407 // Docker 1.11 and above doesn't actually run on kernels older than 3.4, // due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4). if !kernel.CheckKernelVersion(3, 10, 0) { v, _ := kernel.GetKernelVersion() if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String()) } } return nil } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if adjustCPUShares && hostConfig.CPUShares > 0 { // Handle unsupported CPUShares if hostConfig.CPUShares < linuxMinCPUShares { logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) hostConfig.CPUShares = linuxMinCPUShares } else if hostConfig.CPUShares > linuxMaxCPUShares { logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) hostConfig.CPUShares = linuxMaxCPUShares } } if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { // By default, MemorySwap is set to twice the size of Memory. hostConfig.MemorySwap = hostConfig.Memory * 2 } if hostConfig.ShmSize == 0 { hostConfig.ShmSize = config.DefaultShmSize if daemon.configStore != nil { hostConfig.ShmSize = int64(daemon.configStore.ShmSize) } } // Set default IPC mode, if unset for container if hostConfig.IpcMode.IsEmpty() { m := config.DefaultIpcMode if daemon.configStore != nil { m = containertypes.IpcMode(daemon.configStore.IpcMode) } hostConfig.IpcMode = m } // Set default cgroup namespace mode, if unset for container if hostConfig.CgroupnsMode.IsEmpty() { // for cgroup v2: unshare cgroupns even for privileged containers // https://github.com/containers/libpod/pull/4374#issuecomment-549776387 if hostConfig.Privileged && cgroups.Mode() != cgroups.Unified { hostConfig.CgroupnsMode = containertypes.CgroupnsModeHost } else { m := containertypes.CgroupnsModeHost if cgroups.Mode() == cgroups.Unified { m = containertypes.CgroupnsModePrivate } if daemon.configStore != nil { m = containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode) } hostConfig.CgroupnsMode = m } } adaptSharedNamespaceContainer(daemon, hostConfig) var err error secOpts, err := daemon.generateSecurityOpt(hostConfig) if err != nil { return err } hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, secOpts...) if hostConfig.OomKillDisable == nil { defaultOomKillDisable := false hostConfig.OomKillDisable = &defaultOomKillDisable } return nil } // adaptSharedNamespaceContainer replaces container name with its ID in hostConfig. // To be more precisely, it modifies `container:name` to `container:ID` of PidMode, IpcMode // and NetworkMode. // // When a container shares its namespace with another container, use ID can keep the namespace // sharing connection between the two containers even the another container is renamed. func adaptSharedNamespaceContainer(daemon containerGetter, hostConfig *containertypes.HostConfig) { containerPrefix := "container:" if hostConfig.PidMode.IsContainer() { pidContainer := hostConfig.PidMode.Container() // if there is any error returned here, we just ignore it and leave it to be // handled in the following logic if c, err := daemon.GetContainer(pidContainer); err == nil { hostConfig.PidMode = containertypes.PidMode(containerPrefix + c.ID) } } if hostConfig.IpcMode.IsContainer() { ipcContainer := hostConfig.IpcMode.Container() if c, err := daemon.GetContainer(ipcContainer); err == nil { hostConfig.IpcMode = containertypes.IpcMode(containerPrefix + c.ID) } } if hostConfig.NetworkMode.IsContainer() { netContainer := hostConfig.NetworkMode.ConnectedContainer() if c, err := daemon.GetContainer(netContainer); err == nil { hostConfig.NetworkMode = containertypes.NetworkMode(containerPrefix + c.ID) } } } // verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration func verifyPlatformContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) (warnings []string, err error) { fixMemorySwappiness(resources) // memory subsystem checks and adjustments if resources.Memory != 0 && resources.Memory < linuxMinMemory { return warnings, fmt.Errorf("Minimum memory limit allowed is 6MB") } if resources.Memory > 0 && !sysInfo.MemoryLimit { warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.Memory = 0 resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.") resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") } if resources.Memory == 0 && resources.MemorySwap > 0 && !update { return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") } if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness { warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") resources.MemorySwappiness = nil } if resources.MemorySwappiness != nil { swappiness := *resources.MemorySwappiness if swappiness < 0 || swappiness > 100 { return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) } } if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.MemoryReservation = 0 } if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { return warnings, fmt.Errorf("Minimum memory reservation allowed is 6MB") } if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage") } if resources.KernelMemory > 0 { // Kernel memory limit is not supported on cgroup v2. // Even on cgroup v1, kernel memory limit (`kmem.limit_in_bytes`) has been deprecated since kernel 5.4. // https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0b5adf44cae99b3ebcc7 warnings = append(warnings, "Specifying a kernel memory limit is deprecated and will be removed in a future release.") } if resources.KernelMemory > 0 && !sysInfo.KernelMemory { warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.KernelMemory = 0 } if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") } if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) { warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") } if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point // warning the caller if they already wanted the feature to be off if *resources.OomKillDisable { warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") } resources.OomKillDisable = nil } if resources.OomKillDisable != nil && *resources.OomKillDisable && resources.Memory == 0 { warnings = append(warnings, "OOM killer is disabled for the container, but no memory limit is set, this can result in the system running out of resources.") } if resources.PidsLimit != nil && !sysInfo.PidsLimit { if *resources.PidsLimit > 0 { warnings = append(warnings, "Your kernel does not support PIDs limit capabilities or the cgroup is not mounted. PIDs limit discarded.") } resources.PidsLimit = nil } // cpu subsystem checks and adjustments if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 { return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set") } if resources.NanoCPUs > 0 && resources.CPUQuota > 0 { return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set") } if resources.NanoCPUs > 0 && !sysInfo.CPUCfs { return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU CFS scheduler or the cgroup is not mounted") } // The highest precision we could get on Linux is 0.001, by setting // cpu.cfs_period_us=1000ms // cpu.cfs_quota=1ms // See the following link for details: // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt // Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error. // The error message is 0.01 so that this is consistent with Windows if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } if resources.CPUShares > 0 && !sysInfo.CPUShares { warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") resources.CPUShares = 0 } if (resources.CPUPeriod != 0 || resources.CPUQuota != 0) && !sysInfo.CPUCfs { warnings = append(warnings, "Your kernel does not support CPU CFS scheduler. CPU period/quota discarded.") resources.CPUPeriod = 0 resources.CPUQuota = 0 } if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") } if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") } if resources.CPUPercent > 0 { warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS)) resources.CPUPercent = 0 } // cpuset subsystem checks and adjustments if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") resources.CpusetCpus = "" resources.CpusetMems = "" } cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) if err != nil { return warnings, errors.Wrapf(err, "Invalid value %s for cpuset cpus", resources.CpusetCpus) } if !cpusAvailable { return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) } memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) if err != nil { return warnings, errors.Wrapf(err, "Invalid value %s for cpuset mems", resources.CpusetMems) } if !memsAvailable { return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) } // blkio subsystem checks and adjustments if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") resources.BlkioWeight = 0 } if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") } if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) } if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} } if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.") resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} } return warnings, nil } func (daemon *Daemon) getCgroupDriver() string { if UsingSystemd(daemon.configStore) { return cgroupSystemdDriver } if daemon.Rootless() { return cgroupNoneDriver } return cgroupFsDriver } // getCD gets the raw value of the native.cgroupdriver option, if set. func getCD(config *config.Config) string { for _, option := range config.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { continue } return val } return "" } // verifyCgroupDriver validates native.cgroupdriver func verifyCgroupDriver(config *config.Config) error { cd := getCD(config) if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { return nil } if cd == cgroupNoneDriver { return fmt.Errorf("native.cgroupdriver option %s is internally used and cannot be specified manually", cd) } return fmt.Errorf("native.cgroupdriver option %s not supported", cd) } // UsingSystemd returns true if cli option includes native.cgroupdriver=systemd func UsingSystemd(config *config.Config) bool { cd := getCD(config) if cd == cgroupSystemdDriver { return true } // On cgroup v2 hosts, default to systemd driver if cd == "" && cgroups.Mode() == cgroups.Unified && isRunningSystemd() { return true } return false } var ( runningSystemd bool detectSystemd sync.Once ) // isRunningSystemd checks whether the host was booted with systemd as its init // system. This functions similarly to systemd's `sd_booted(3)`: internally, it // checks whether /run/systemd/system/ exists and is a directory. // http://www.freedesktop.org/software/systemd/man/sd_booted.html // // NOTE: This function comes from package github.com/coreos/go-systemd/util // It was borrowed here to avoid a dependency on cgo. func isRunningSystemd() bool { detectSystemd.Do(func() { fi, err := os.Lstat("/run/systemd/system") if err != nil { return } runningSystemd = fi.IsDir() }) return runningSystemd } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) { if hostConfig == nil { return nil, nil } sysInfo := daemon.RawSysInfo() w, err := verifyPlatformContainerResources(&hostConfig.Resources, sysInfo, update) // no matter err is nil or not, w could have data in itself. warnings = append(warnings, w...) if err != nil { return warnings, err } if hostConfig.ShmSize < 0 { return warnings, fmt.Errorf("SHM size can not be less than 0") } if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) } // ip-forwarding does not affect container with '--net=host' (or '--net=none') if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") } if hostConfig.NetworkMode.IsHost() && len(hostConfig.PortBindings) > 0 { warnings = append(warnings, "Published ports are discarded when using host network mode") } // check for various conflicting options with user namespaces if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { if hostConfig.Privileged { return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces. You must run the container in the host namespace when running privileged mode") } if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("cannot share the host's network namespace when user namespaces are enabled") } if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled") } } if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { // CgroupParent for systemd cgroup should be named as "xxx.slice" if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if hostConfig.Runtime == "" { hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() } if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) } parser := volumemounts.NewParser() for dest := range hostConfig.Tmpfs { if err := parser.ValidateTmpfsMountDestination(dest); err != nil { return warnings, err } } if !hostConfig.CgroupnsMode.Valid() { return warnings, fmt.Errorf("invalid cgroup namespace mode: %v", hostConfig.CgroupnsMode) } if hostConfig.CgroupnsMode.IsPrivate() { if !sysInfo.CgroupNamespaces { warnings = append(warnings, "Your kernel does not support cgroup namespaces. Cgroup namespace setting discarded.") } } if hostConfig.Runtime == config.LinuxV1RuntimeName || (hostConfig.Runtime == "" && daemon.configStore.DefaultRuntime == config.LinuxV1RuntimeName) { warnings = append(warnings, fmt.Sprintf("Configured runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName)) } return warnings, nil } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(conf *config.Config) error { if conf.ContainerdNamespace == conf.ContainerdPluginNamespace { return errors.New("containers namespace and plugins namespace cannot be the same") } // Check for mutually incompatible config options if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" { return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") } if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication { return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") } if conf.BridgeConfig.EnableIP6Tables && !conf.Experimental { return fmt.Errorf("ip6tables rules are only available if experimental features are enabled") } if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq { conf.BridgeConfig.EnableIPMasq = false } if err := verifyCgroupDriver(conf); err != nil { return err } if conf.CgroupParent != "" && UsingSystemd(conf) { if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") { return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if conf.Rootless && UsingSystemd(conf) && cgroups.Mode() != cgroups.Unified { return fmt.Errorf("exec-opt native.cgroupdriver=systemd requires cgroup v2 for rootless mode") } configureRuntimes(conf) if rtName := conf.GetDefaultRuntimeName(); rtName != "" { if conf.GetRuntime(rtName) == nil { return fmt.Errorf("specified default runtime '%s' does not exist", rtName) } if rtName == config.LinuxV1RuntimeName { logrus.Warnf("Configured default runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName) } } return nil } // checkSystem validates platform-specific requirements func checkSystem() error { return checkKernel() } // configureMaxThreads sets the Go runtime max threads threshold // which is 90% of the kernel setting from /proc/sys/kernel/threads-max func configureMaxThreads(config *config.Config) error { mt, err := os.ReadFile("/proc/sys/kernel/threads-max") if err != nil { return err } mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) if err != nil { return err } maxThreads := (mtint / 100) * 90 debug.SetMaxThreads(maxThreads) logrus.Debugf("Golang's threads limit set to %d", maxThreads) return nil } func overlaySupportsSelinux() (bool, error) { f, err := os.Open("/proc/kallsyms") if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.HasSuffix(s.Text(), " security_inode_copy_up") { return true, nil } } return false, s.Err() } // configureKernelSecuritySupport configures and validates security support for the kernel func configureKernelSecuritySupport(config *config.Config, driverName string) error { if config.EnableSelinuxSupport { if !selinux.GetEnabled() { logrus.Warn("Docker could not enable SELinux on the host system") return nil } if driverName == "overlay" || driverName == "overlay2" { // If driver is overlay or overlay2, make sure kernel // supports selinux with overlay. supported, err := overlaySupportsSelinux() if err != nil { return err } if !supported { logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName) } } } else { selinux.SetDisabled() } return nil } func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) if err != nil { return nil, err } controller, err := libnetwork.New(netOptions...) if err != nil { return nil, fmt.Errorf("error obtaining controller instance: %v", err) } if len(activeSandboxes) > 0 { logrus.Info("There are old running containers, the network config will not take affect") setHostGatewayIP(daemon.configStore, controller) return controller, nil } // Initialize default network on "null" if n, _ := controller.NetworkByName("none"); n == nil { if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) } } // Initialize default network on "host" if n, _ := controller.NetworkByName("host"); n == nil { if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) } } // Clear stale bridge network if n, err := controller.NetworkByName("bridge"); err == nil { if err = n.Delete(); err != nil { return nil, fmt.Errorf("could not delete the default bridge network: %v", err) } if len(config.NetworkConfig.DefaultAddressPools.Value()) > 0 && !daemon.configStore.LiveRestoreEnabled { removeDefaultBridgeInterface() } } if !config.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, config); err != nil { return nil, err } } else { removeDefaultBridgeInterface() } // Set HostGatewayIP to the default bridge's IP if it is empty setHostGatewayIP(daemon.configStore, controller) return controller, nil } // setHostGatewayIP sets cfg.HostGatewayIP to the default bridge's IP if it is empty. func setHostGatewayIP(config *config.Config, controller libnetwork.NetworkController) { if config.HostGatewayIP != nil { return } if n, err := controller.NetworkByName("bridge"); err == nil { v4Info, v6Info := n.Info().IpamInfo() var gateway net.IP if len(v4Info) > 0 { gateway = v4Info[0].Gateway.IP } else if len(v6Info) > 0 { gateway = v6Info[0].Gateway.IP } config.HostGatewayIP = gateway } } func driverOptions(config *config.Config) nwconfig.Option { return nwconfig.OptionDriverConfig("bridge", options.Generic{ netlabel.GenericData: options.Generic{ "EnableIPForwarding": config.BridgeConfig.EnableIPForward, "EnableIPTables": config.BridgeConfig.EnableIPTables, "EnableIP6Tables": config.BridgeConfig.EnableIP6Tables, "EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy, "UserlandProxyPath": config.BridgeConfig.UserlandProxyPath, }, }) } func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { bridgeName := bridge.DefaultBridgeName if config.BridgeConfig.Iface != "" { bridgeName = config.BridgeConfig.Iface } netOption := map[string]string{ bridge.BridgeName: bridgeName, bridge.DefaultBridge: strconv.FormatBool(true), netlabel.DriverMTU: strconv.Itoa(config.Mtu), bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq), bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication), } // --ip processing if config.BridgeConfig.DefaultIP != nil { netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String() } ipamV4Conf := &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) if err != nil { return errors.Wrap(err, "list bridge addresses failed") } nw := nwList[0] if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" { _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return errors.Wrap(err, "parse CIDR failed") } // Iterate through in case there are multiple addresses for the bridge for _, entry := range nwList { if fCIDR.Contains(entry.IP) { nw = entry break } } } ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) if hip.IsGlobalUnicast() { ipamV4Conf.Gateway = nw.IP.String() } if config.BridgeConfig.IP != "" { ip, ipNet, err := net.ParseCIDR(config.BridgeConfig.IP) if err != nil { return err } ipamV4Conf.PreferredPool = ipNet.String() ipamV4Conf.Gateway = ip.String() } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) } if config.BridgeConfig.FixedCIDR != "" { _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return err } ipamV4Conf.SubPool = fCIDR.String() } if config.BridgeConfig.DefaultGatewayIPv4 != nil { ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String() } var ( deferIPv6Alloc bool ipamV6Conf *libnetwork.IpamConf ) if config.BridgeConfig.EnableIPv6 && config.BridgeConfig.FixedCIDRv6 == "" { return errdefs.InvalidParameter(errors.New("IPv6 is enabled for the default bridge, but no subnet is configured. Specify an IPv6 subnet using --fixed-cidr-v6")) } else if config.BridgeConfig.FixedCIDRv6 != "" { _, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6) if err != nil { return err } // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has // at least 48 host bits, we need to guarantee the current behavior where the containers' // IPv6 addresses will be constructed based on the containers' interface MAC address. // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints // on this network until after the driver has created the endpoint and returned the // constructed address. Libnetwork will then reserve this address with the ipam driver. ones, _ := fCIDRv6.Mask.Size() deferIPv6Alloc = ones <= 80 ipamV6Conf = &libnetwork.IpamConf{ AuxAddresses: make(map[string]string), PreferredPool: fCIDRv6.String(), } // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 // address belongs to the same network, we need to inform libnetwork about it, so // that it can be reserved with IPAM and it will not be given away to somebody else for _, nw6 := range nw6List { if fCIDRv6.Contains(nw6.IP) { ipamV6Conf.Gateway = nw6.IP.String() break } } } if config.BridgeConfig.DefaultGatewayIPv6 != nil { if ipamV6Conf == nil { ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} } ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String() } v4Conf := []*libnetwork.IpamConf{ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} if ipamV6Conf != nil { v6Conf = append(v6Conf, ipamV6Conf) } // Initialize default network on "bridge" with the same name _, err = controller.NewNetwork("bridge", "bridge", "", libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6), libnetwork.NetworkOptionDriverOpts(netOption), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) if err != nil { return fmt.Errorf("Error creating default \"bridge\" network: %v", err) } return nil } // Remove default bridge interface if present (--bridge=none use case) func removeDefaultBridgeInterface() { if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil { if err := netlink.LinkDel(lnk); err != nil { logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) } } } func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error { return func(initPath containerfs.ContainerFS) error { return initlayer.Setup(initPath, idMapping.RootPair()) } } // Parse the remapped root (user namespace) option, which can be one of: // username - valid username from /etc/passwd // username:groupname - valid username; valid groupname from /etc/group // uid - 32-bit unsigned int valid Linux UID value // uid:gid - uid value; 32-bit unsigned int Linux GID value // // If no groupname is specified, and a username is specified, an attempt // will be made to lookup a gid for that username as a groupname // // If names are used, they are verified to exist in passwd/group func parseRemappedRoot(usergrp string) (string, string, error) { var ( userID, groupID int username, groupname string ) idparts := strings.Split(usergrp, ":") if len(idparts) > 2 { return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) } if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { // must be a uid; take it as valid userID = int(uid) luser, err := idtools.LookupUID(userID) if err != nil { return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) } username = luser.Name if len(idparts) == 1 { // if the uid was numeric and no gid was specified, take the uid as the gid groupID = userID lgrp, err := idtools.LookupGID(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) } groupname = lgrp.Name } } else { lookupName := idparts[0] // special case: if the user specified "default", they want Docker to create or // use (after creation) the "dockremap" user/group for root remapping if lookupName == defaultIDSpecifier { lookupName = defaultRemappedID } luser, err := idtools.LookupUser(lookupName) if err != nil && idparts[0] != defaultIDSpecifier { // error if the name requested isn't the special "dockremap" ID return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) } else if err != nil { // special case-- if the username == "default", then we have been asked // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} // ranges will be used for the user and group mappings in user namespaced containers _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) if err == nil { return defaultRemappedID, defaultRemappedID, nil } return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) } username = luser.Name if len(idparts) == 1 { // we only have a string username, and no group specified; look up gid from username as group group, err := idtools.LookupGroup(lookupName) if err != nil { return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) } groupname = group.Name } } if len(idparts) == 2 { // groupname or gid is separately specified and must be resolved // to an unsigned 32-bit gid if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { // must be a gid, take it as valid groupID = int(gid) lgrp, err := idtools.LookupGID(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) } groupname = lgrp.Name } else { // not a number; attempt a lookup if _, err := idtools.LookupGroup(idparts[1]); err != nil { return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) } groupname = idparts[1] } } return username, groupname, nil } func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) { if runtime.GOOS != "linux" && config.RemappedRoot != "" { return nil, fmt.Errorf("User namespaces are only supported on Linux") } // if the daemon was started with remapped root option, parse // the config option to the int uid,gid values if config.RemappedRoot != "" { username, groupname, err := parseRemappedRoot(config.RemappedRoot) if err != nil { return nil, err } if username == "root" { // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op // effectively logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") return &idtools.IdentityMapping{}, nil } logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s", username) // update remapped root setting now that we have resolved them to actual names config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) mappings, err := idtools.NewIdentityMapping(username) if err != nil { return nil, errors.Wrap(err, "Can't create ID mappings") } return mappings, nil } return &idtools.IdentityMapping{}, nil } func setupDaemonRoot(config *config.Config, rootDir string, remappedRoot idtools.Identity) error { config.Root = rootDir // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) // so that syscalls executing as non-root, operating on subdirectories of the graph root // (e.g. mounted layers of a container) can traverse this path. // The user namespace support will create subdirectories for the remapped root host uid:gid // pair owned by that same uid:gid pair for proper write access to those needed metadata and // layer content subtrees. if _, err := os.Stat(rootDir); err == nil { // root current exists; verify the access bits are correct by setting them if err = os.Chmod(rootDir, 0711); err != nil { return err } } else if os.IsNotExist(err) { // no root exists yet, create it 0711 with root:root ownership if err := os.MkdirAll(rootDir, 0711); err != nil { return err } } id := idtools.Identity{UID: idtools.CurrentIdentity().UID, GID: remappedRoot.GID} // First make sure the current root dir has the correct perms. if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil { return errors.Wrapf(err, "could not create or set daemon root permissions: %s", config.Root) } // if user namespaces are enabled we will create a subtree underneath the specified root // with any/all specified remapped root uid/gid options on the daemon creating // a new subdirectory with ownership set to the remapped uid/gid (so as to allow // `chdir()` to work for containers namespaced to that uid/gid) if config.RemappedRoot != "" { config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", remappedRoot.UID, remappedRoot.GID)) logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) // Create the root directory if it doesn't exist if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil { return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) } // we also need to verify that any pre-existing directories in the path to // the graphroot won't block access to remapped root--if any pre-existing directory // has strict permissions that don't allow "x", container start will fail, so // better to warn and fail now dirPath := config.Root for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if !idtools.CanAccess(dirPath, remappedRoot) { return fmt.Errorf("a subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories", config.Root) } } } if err := setupDaemonRootPropagation(config); err != nil { logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior") } return nil } func setupDaemonRootPropagation(cfg *config.Config) error { rootParentMount, mountOptions, err := getSourceMount(cfg.Root) if err != nil { return errors.Wrap(err, "error getting daemon root's parent mount") } var cleanupOldFile bool cleanupFile := getUnmountOnShutdownPath(cfg) defer func() { if !cleanupOldFile { return } if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) { logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file") } }() if hasMountInfoOption(mountOptions, sharedPropagationOption, slavePropagationOption) { cleanupOldFile = true return nil } if err := mount.MakeShared(cfg.Root); err != nil { return errors.Wrap(err, "could not setup daemon root propagation to shared") } // check the case where this may have already been a mount to itself. // If so then the daemon only performed a remount and should not try to unmount this later. if rootParentMount == cfg.Root { cleanupOldFile = true return nil } if err := os.MkdirAll(filepath.Dir(cleanupFile), 0700); err != nil { return errors.Wrap(err, "error creating dir to store mount cleanup file") } if err := os.WriteFile(cleanupFile, nil, 0600); err != nil { return errors.Wrap(err, "error writing file to signal mount cleanup on shutdown") } return nil } // getUnmountOnShutdownPath generates the path to used when writing the file that signals to the daemon that on shutdown // the daemon root should be unmounted. func getUnmountOnShutdownPath(config *config.Config) string { return filepath.Join(config.ExecRoot, "unmount-on-shutdown") } // registerLinks writes the links to a file. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { return nil } for _, l := range hostConfig.Links { name, alias, err := opts.ParseLink(l) if err != nil { return err } child, err := daemon.GetContainer(name) if err != nil { if errdefs.IsNotFound(err) { // Trying to link to a non-existing container is not valid, and // should return an "invalid parameter" error. Returning a "not // found" error here would make the client report the container's // image could not be found (see moby/moby#39823) err = errdefs.InvalidParameter(err) } return errors.Wrapf(err, "could not get container for %s", name) } for child.HostConfig.NetworkMode.IsContainer() { parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) child, err = daemon.GetContainer(parts[1]) if err != nil { if errdefs.IsNotFound(err) { // Trying to link to a non-existing container is not valid, and // should return an "invalid parameter" error. Returning a "not // found" error here would make the client report the container's // image could not be found (see moby/moby#39823) err = errdefs.InvalidParameter(err) } return errors.Wrapf(err, "Could not get container for %s", parts[1]) } } if child.HostConfig.NetworkMode.IsHost() { return runconfig.ErrConflictHostNetworkAndLinks } if err := daemon.registerLink(container, child, alias); err != nil { return err } } // After we load all the links into the daemon // set them to nil on the hostconfig _, err := container.WriteHostConfig() return err } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { return daemon.Unmount(container) } func copyBlkioEntry(entries []*statsV1.BlkIOEntry) []types.BlkioStatEntry { out := make([]types.BlkioStatEntry, len(entries)) for i, re := range entries { out[i] = types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: re.Op, Value: re.Value, } } return out } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning(c.ID) } cs, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { if strings.Contains(err.Error(), "container not found") { return nil, containerNotFound(c.ID) } return nil, err } s := &types.StatsJSON{} s.Read = cs.Read stats := cs.Metrics switch t := stats.(type) { case *statsV1.Metrics: return daemon.statsV1(s, t) case *statsV2.Metrics: return daemon.statsV2(s, t) default: return nil, errors.Errorf("unexpected type of metrics %+v", t) } } func (daemon *Daemon) statsV1(s *types.StatsJSON, stats *statsV1.Metrics) (*types.StatsJSON, error) { if stats.Blkio != nil { s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: copyBlkioEntry(stats.Blkio.IoServiceBytesRecursive), IoServicedRecursive: copyBlkioEntry(stats.Blkio.IoServicedRecursive), IoQueuedRecursive: copyBlkioEntry(stats.Blkio.IoQueuedRecursive), IoServiceTimeRecursive: copyBlkioEntry(stats.Blkio.IoServiceTimeRecursive), IoWaitTimeRecursive: copyBlkioEntry(stats.Blkio.IoWaitTimeRecursive), IoMergedRecursive: copyBlkioEntry(stats.Blkio.IoMergedRecursive), IoTimeRecursive: copyBlkioEntry(stats.Blkio.IoTimeRecursive), SectorsRecursive: copyBlkioEntry(stats.Blkio.SectorsRecursive), } } if stats.CPU != nil { s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: stats.CPU.Usage.Total, PercpuUsage: stats.CPU.Usage.PerCPU, UsageInKernelmode: stats.CPU.Usage.Kernel, UsageInUsermode: stats.CPU.Usage.User, }, ThrottlingData: types.ThrottlingData{ Periods: stats.CPU.Throttling.Periods, ThrottledPeriods: stats.CPU.Throttling.ThrottledPeriods, ThrottledTime: stats.CPU.Throttling.ThrottledTime, }, } } if stats.Memory != nil { raw := map[string]uint64{ "cache": stats.Memory.Cache, "rss": stats.Memory.RSS, "rss_huge": stats.Memory.RSSHuge, "mapped_file": stats.Memory.MappedFile, "dirty": stats.Memory.Dirty, "writeback": stats.Memory.Writeback, "pgpgin": stats.Memory.PgPgIn, "pgpgout": stats.Memory.PgPgOut, "pgfault": stats.Memory.PgFault, "pgmajfault": stats.Memory.PgMajFault, "inactive_anon": stats.Memory.InactiveAnon, "active_anon": stats.Memory.ActiveAnon, "inactive_file": stats.Memory.InactiveFile, "active_file": stats.Memory.ActiveFile, "unevictable": stats.Memory.Unevictable, "hierarchical_memory_limit": stats.Memory.HierarchicalMemoryLimit, "hierarchical_memsw_limit": stats.Memory.HierarchicalSwapLimit, "total_cache": stats.Memory.TotalCache, "total_rss": stats.Memory.TotalRSS, "total_rss_huge": stats.Memory.TotalRSSHuge, "total_mapped_file": stats.Memory.TotalMappedFile, "total_dirty": stats.Memory.TotalDirty, "total_writeback": stats.Memory.TotalWriteback, "total_pgpgin": stats.Memory.TotalPgPgIn, "total_pgpgout": stats.Memory.TotalPgPgOut, "total_pgfault": stats.Memory.TotalPgFault, "total_pgmajfault": stats.Memory.TotalPgMajFault, "total_inactive_anon": stats.Memory.TotalInactiveAnon, "total_active_anon": stats.Memory.TotalActiveAnon, "total_inactive_file": stats.Memory.TotalInactiveFile, "total_active_file": stats.Memory.TotalActiveFile, "total_unevictable": stats.Memory.TotalUnevictable, } if stats.Memory.Usage != nil { s.MemoryStats = types.MemoryStats{ Stats: raw, Usage: stats.Memory.Usage.Usage, MaxUsage: stats.Memory.Usage.Max, Limit: stats.Memory.Usage.Limit, Failcnt: stats.Memory.Usage.Failcnt, } } else { s.MemoryStats = types.MemoryStats{ Stats: raw, } } // if the container does not set memory limit, use the machineMemory if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { s.MemoryStats.Limit = daemon.machineMemory } } if stats.Pids != nil { s.PidsStats = types.PidsStats{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, } } return s, nil } func (daemon *Daemon) statsV2(s *types.StatsJSON, stats *statsV2.Metrics) (*types.StatsJSON, error) { if stats.Io != nil { var isbr []types.BlkioStatEntry for _, re := range stats.Io.Usage { isbr = append(isbr, types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: "read", Value: re.Rbytes, }, types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: "write", Value: re.Wbytes, }, ) } s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: isbr, // Other fields are unsupported } } if stats.CPU != nil { s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: stats.CPU.UsageUsec * 1000, // PercpuUsage is not supported UsageInKernelmode: stats.CPU.SystemUsec * 1000, UsageInUsermode: stats.CPU.UserUsec * 1000, }, ThrottlingData: types.ThrottlingData{ Periods: stats.CPU.NrPeriods, ThrottledPeriods: stats.CPU.NrThrottled, ThrottledTime: stats.CPU.ThrottledUsec * 1000, }, } } if stats.Memory != nil { s.MemoryStats = types.MemoryStats{ // Stats is not compatible with v1 Stats: map[string]uint64{ "anon": stats.Memory.Anon, "file": stats.Memory.File, "kernel_stack": stats.Memory.KernelStack, "slab": stats.Memory.Slab, "sock": stats.Memory.Sock, "shmem": stats.Memory.Shmem, "file_mapped": stats.Memory.FileMapped, "file_dirty": stats.Memory.FileDirty, "file_writeback": stats.Memory.FileWriteback, "anon_thp": stats.Memory.AnonThp, "inactive_anon": stats.Memory.InactiveAnon, "active_anon": stats.Memory.ActiveAnon, "inactive_file": stats.Memory.InactiveFile, "active_file": stats.Memory.ActiveFile, "unevictable": stats.Memory.Unevictable, "slab_reclaimable": stats.Memory.SlabReclaimable, "slab_unreclaimable": stats.Memory.SlabUnreclaimable, "pgfault": stats.Memory.Pgfault, "pgmajfault": stats.Memory.Pgmajfault, "workingset_refault": stats.Memory.WorkingsetRefault, "workingset_activate": stats.Memory.WorkingsetActivate, "workingset_nodereclaim": stats.Memory.WorkingsetNodereclaim, "pgrefill": stats.Memory.Pgrefill, "pgscan": stats.Memory.Pgscan, "pgsteal": stats.Memory.Pgsteal, "pgactivate": stats.Memory.Pgactivate, "pgdeactivate": stats.Memory.Pgdeactivate, "pglazyfree": stats.Memory.Pglazyfree, "pglazyfreed": stats.Memory.Pglazyfreed, "thp_fault_alloc": stats.Memory.ThpFaultAlloc, "thp_collapse_alloc": stats.Memory.ThpCollapseAlloc, }, Usage: stats.Memory.Usage, // MaxUsage is not supported Limit: stats.Memory.UsageLimit, } // if the container does not set memory limit, use the machineMemory if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { s.MemoryStats.Limit = daemon.machineMemory } if stats.MemoryEvents != nil { // Failcnt is set to the "oom" field of the "memory.events" file. // See https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html s.MemoryStats.Failcnt = stats.MemoryEvents.Oom } } if stats.Pids != nil { s.PidsStats = types.PidsStats{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, } } return s, nil } // setDefaultIsolation determines the default isolation mode for the // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { return nil } // setupDaemonProcess sets various settings for the daemon's process func setupDaemonProcess(config *config.Config) error { // setup the daemons oom_score_adj if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil { return err } if err := setMayDetachMounts(); err != nil { logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter") } return nil } // This is used to allow removal of mountpoints that may be mounted in other // namespaces on RHEL based kernels starting from RHEL 7.4. // Without this setting, removals on these RHEL based kernels may fail with // "device or resource busy". // This setting is not available in upstream kernels as it is not configurable, // but has been in the upstream kernels since 3.15. func setMayDetachMounts() error { f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0) if err != nil { if os.IsNotExist(err) { return nil } return errors.Wrap(err, "error opening may_detach_mounts kernel config file") } defer f.Close() _, err = f.WriteString("1") if os.IsPermission(err) { // Setting may_detach_mounts does not work in an // unprivileged container. Ignore the error, but log // it if we appear not to be in that situation. if !userns.RunningInUserNS() { logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1") } return nil } return err } func setupOOMScoreAdj(score int) error { if score == 0 { return nil } f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) if err != nil { return err } defer f.Close() stringScore := strconv.Itoa(score) _, err = f.WriteString(stringScore) if os.IsPermission(err) { // Setting oom_score_adj does not work in an // unprivileged container. Ignore the error, but log // it if we appear not to be in that situation. if !userns.RunningInUserNS() { logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore) } return nil } return err } func (daemon *Daemon) initCPURtController(mnt, path string) error { if path == "/" || path == "." { return nil } // Recursively create cgroup to ensure that the system and all parent cgroups have values set // for the period and runtime as this limits what the children can be set to. if err := daemon.initCPURtController(mnt, filepath.Dir(path)); err != nil { return err } path = filepath.Join(mnt, path) if err := os.MkdirAll(path, 0755); err != nil { return err } if err := maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil { return err } return maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path) } func maybeCreateCPURealTimeFile(configValue int64, file string, path string) error { if configValue == 0 { return nil } return os.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700) } func (daemon *Daemon) setupSeccompProfile() error { switch profile := daemon.configStore.SeccompProfile; profile { case "", config.SeccompProfileDefault: daemon.seccompProfilePath = config.SeccompProfileDefault case config.SeccompProfileUnconfined: daemon.seccompProfilePath = config.SeccompProfileUnconfined default: daemon.seccompProfilePath = profile b, err := os.ReadFile(profile) if err != nil { return fmt.Errorf("opening seccomp profile (%s) failed: %v", profile, err) } daemon.seccompProfile = b } return nil } // RawSysInfo returns *sysinfo.SysInfo . func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo { var siOpts []sysinfo.Opt if daemon.getCgroupDriver() == cgroupSystemdDriver { if euid := os.Getenv("ROOTLESSKIT_PARENT_EUID"); euid != "" { siOpts = append(siOpts, sysinfo.WithCgroup2GroupPath("/user.slice/user-"+euid+".slice")) } } return sysinfo.New(siOpts...) } func recursiveUnmount(target string) error { return mount.RecursiveUnmount(target) } func (daemon *Daemon) initLibcontainerd(ctx context.Context) error { var err error daemon.containerd, err = remote.NewClient( ctx, daemon.containerdCli, filepath.Join(daemon.configStore.ExecRoot, "containerd"), daemon.configStore.ContainerdNamespace, daemon, ) return err }
sanchayanghosh
58e68789bf37b1484e773a3db99146ae36942295
40ccedd61b9d8b64fc75d64cd57b9ee35c769632
The current doc for this function now describes what it's fixing, which is more appropriate for the commit message. Perhaps change it to what it does; ``` // setHostGatewayIP sets HostGatewayIP to the default bridge's IP if it is empty. ```
thaJeztah
4,482
moby/moby
42,785
Fixed docker.internal.gateway not displaying properly on live restore
fixes https://github.com/moby/moby/issues/42753 …erly <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** I fixed the issue 42573, where the docker.internal.host-gateway was not assigned to containers after a reboot, when live-restore was enabled. This was to do with containers that ran after the docker daemon was restarted. **- How I did it** There was a function that assigned the host gateway IP addresses. I made sure that the IP address was also called inside the conditional that ran when there were active sandboxes. **- How to verify it** #42753 - details the steps **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> This pull request is a change to remove the recurrence of #42753 , by writing HostGatewayIP to a function and calling that once in the normal case and once when the sandboxes(containers) are running during daemon restart. The pull request is a WIP since I am yet to write a unit test. I would need your guidance for it. I am not sure how to call the API for the test, for docker run --add-host=.... busybox in script form and getting the output. I know how to do it with fork/subprocess, but am not sure of the API approach to it. **- A picture of a cute animal (not mandatory but encouraged)** ![Not mine, but posting it here.](https://ichef.bbci.co.uk/news/976/cpsprodpb/3B4B/production/_109897151_otternew.jpg)
null
2021-08-25 21:06:53+00:00
2021-11-16 04:26:21+00:00
daemon/daemon_unix.go
//go:build linux || freebsd // +build linux freebsd package daemon // import "github.com/docker/docker/daemon" import ( "bufio" "context" "fmt" "net" "os" "path/filepath" "runtime" "runtime/debug" "strconv" "strings" "sync" "time" "github.com/containerd/cgroups" statsV1 "github.com/containerd/cgroups/stats/v1" statsV2 "github.com/containerd/cgroups/v2/stats" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/blkiodev" pblkiodev "github.com/docker/docker/api/types/blkiodev" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/initlayer" "github.com/docker/docker/errdefs" "github.com/docker/docker/libcontainerd/remote" "github.com/docker/docker/libnetwork" nwconfig "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/drivers/bridge" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/netutils" "github.com/docker/docker/libnetwork/options" lntypes "github.com/docker/docker/libnetwork/types" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/runconfig" volumemounts "github.com/docker/docker/volume/mounts" "github.com/moby/sys/mount" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" ) const ( isWindows = false // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 linuxMinCPUShares = 2 linuxMaxCPUShares = 262144 platformSupported = true // It's not kernel limit, we want this 6M limit to account for overhead during startup, and to supply a reasonable functional container linuxMinMemory = 6291456 // constants for remapped root settings defaultIDSpecifier = "default" defaultRemappedID = "dockremap" // constant for cgroup drivers cgroupFsDriver = "cgroupfs" cgroupSystemdDriver = "systemd" cgroupNoneDriver = "none" ) type containerGetter interface { GetContainer(string) (*container.Container, error) } func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory { memory := specs.LinuxMemory{} if config.Memory > 0 { memory.Limit = &config.Memory } if config.MemoryReservation > 0 { memory.Reservation = &config.MemoryReservation } if config.MemorySwap > 0 { memory.Swap = &config.MemorySwap } if config.MemorySwappiness != nil { swappiness := uint64(*config.MemorySwappiness) memory.Swappiness = &swappiness } if config.OomKillDisable != nil { memory.DisableOOMKiller = config.OomKillDisable } if config.KernelMemory != 0 { memory.Kernel = &config.KernelMemory } if config.KernelMemoryTCP != 0 { memory.KernelTCP = &config.KernelMemoryTCP } return &memory } func getPidsLimit(config containertypes.Resources) *specs.LinuxPids { if config.PidsLimit == nil { return nil } if *config.PidsLimit <= 0 { // docker API allows 0 and negative values to unset this to be consistent // with default values. When updating values, runc requires -1 to unset // the previous limit. return &specs.LinuxPids{Limit: -1} } return &specs.LinuxPids{Limit: *config.PidsLimit} } func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) { cpu := specs.LinuxCPU{} if config.CPUShares < 0 { return nil, fmt.Errorf("shares: invalid argument") } if config.CPUShares >= 0 { shares := uint64(config.CPUShares) cpu.Shares = &shares } if config.CpusetCpus != "" { cpu.Cpus = config.CpusetCpus } if config.CpusetMems != "" { cpu.Mems = config.CpusetMems } if config.NanoCPUs > 0 { // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt period := uint64(100 * time.Millisecond / time.Microsecond) quota := config.NanoCPUs * int64(period) / 1e9 cpu.Period = &period cpu.Quota = &quota } if config.CPUPeriod != 0 { period := uint64(config.CPUPeriod) cpu.Period = &period } if config.CPUQuota != 0 { q := config.CPUQuota cpu.Quota = &q } if config.CPURealtimePeriod != 0 { period := uint64(config.CPURealtimePeriod) cpu.RealtimePeriod = &period } if config.CPURealtimeRuntime != 0 { c := config.CPURealtimeRuntime cpu.RealtimeRuntime = &c } return &cpu, nil } func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) { var stat unix.Stat_t var blkioWeightDevices []specs.LinuxWeightDevice for _, weightDevice := range config.BlkioWeightDevice { if err := unix.Stat(weightDevice.Path, &stat); err != nil { return nil, errors.WithStack(&os.PathError{Op: "stat", Path: weightDevice.Path, Err: err}) } weight := weightDevice.Weight d := specs.LinuxWeightDevice{Weight: &weight} // The type is 32bit on mips. d.Major = int64(unix.Major(uint64(stat.Rdev))) //nolint: unconvert d.Minor = int64(unix.Minor(uint64(stat.Rdev))) //nolint: unconvert blkioWeightDevices = append(blkioWeightDevices, d) } return blkioWeightDevices, nil } func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { container.NoNewPrivileges = daemon.configStore.NoNewPrivileges return parseSecurityOpt(container, hostConfig) } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { var ( labelOpts []string err error ) for _, opt := range config.SecurityOpt { if opt == "no-new-privileges" { container.NoNewPrivileges = true continue } if opt == "disable" { labelOpts = append(labelOpts, "disable") continue } var con []string if strings.Contains(opt, "=") { con = strings.SplitN(opt, "=", 2) } else if strings.Contains(opt, ":") { con = strings.SplitN(opt, ":", 2) logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.") } if len(con) != 2 { return fmt.Errorf("invalid --security-opt 1: %q", opt) } switch con[0] { case "label": labelOpts = append(labelOpts, con[1]) case "apparmor": container.AppArmorProfile = con[1] case "seccomp": container.SeccompProfile = con[1] case "no-new-privileges": noNewPrivileges, err := strconv.ParseBool(con[1]) if err != nil { return fmt.Errorf("invalid --security-opt 2: %q", opt) } container.NoNewPrivileges = noNewPrivileges default: return fmt.Errorf("invalid --security-opt 2: %q", opt) } } container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) return err } func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) { var throttleDevices []specs.LinuxThrottleDevice var stat unix.Stat_t for _, d := range devs { if err := unix.Stat(d.Path, &stat); err != nil { return nil, errors.WithStack(&os.PathError{Op: "stat", Path: d.Path, Err: err}) } d := specs.LinuxThrottleDevice{Rate: d.Rate} // the type is 32bit on mips d.Major = int64(unix.Major(uint64(stat.Rdev))) //nolint: unconvert d.Minor = int64(unix.Minor(uint64(stat.Rdev))) //nolint: unconvert throttleDevices = append(throttleDevices, d) } return throttleDevices, nil } // adjustParallelLimit takes a number of objects and a proposed limit and // figures out if it's reasonable (and adjusts it accordingly). This is only // used for daemon startup, which does a lot of parallel loading of containers // (and if we exceed RLIMIT_NOFILE then we're in trouble). func adjustParallelLimit(n int, limit int) int { // Rule-of-thumb overhead factor (how many files will each goroutine open // simultaneously). Yes, this is ugly but to be frank this whole thing is // ugly. const overhead = 2 // On Linux, we need to ensure that parallelStartupJobs doesn't cause us to // exceed RLIMIT_NOFILE. If parallelStartupJobs is too large, we reduce it // and give a warning (since in theory the user should increase their // ulimits to the largest possible value for dockerd). var rlim unix.Rlimit if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil { logrus.Warnf("Couldn't find dockerd's RLIMIT_NOFILE to double-check startup parallelism factor: %v", err) return limit } softRlimit := int(rlim.Cur) // Much fewer containers than RLIMIT_NOFILE. No need to adjust anything. if softRlimit > overhead*n { return limit } // RLIMIT_NOFILE big enough, no need to adjust anything. if softRlimit > overhead*limit { return limit } logrus.Warnf("Found dockerd's open file ulimit (%v) is far too small -- consider increasing it significantly (at least %v)", softRlimit, overhead*limit) return softRlimit / overhead } func checkKernel() error { // Check for unsupported kernel versions // FIXME: it would be cleaner to not test for specific versions, but rather // test for specific functionalities. // Unfortunately we can't test for the feature "does not cause a kernel panic" // without actually causing a kernel panic, so we need this workaround until // the circumstances of pre-3.10 crashes are clearer. // For details see https://github.com/docker/docker/issues/407 // Docker 1.11 and above doesn't actually run on kernels older than 3.4, // due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4). if !kernel.CheckKernelVersion(3, 10, 0) { v, _ := kernel.GetKernelVersion() if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String()) } } return nil } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if adjustCPUShares && hostConfig.CPUShares > 0 { // Handle unsupported CPUShares if hostConfig.CPUShares < linuxMinCPUShares { logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) hostConfig.CPUShares = linuxMinCPUShares } else if hostConfig.CPUShares > linuxMaxCPUShares { logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) hostConfig.CPUShares = linuxMaxCPUShares } } if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { // By default, MemorySwap is set to twice the size of Memory. hostConfig.MemorySwap = hostConfig.Memory * 2 } if hostConfig.ShmSize == 0 { hostConfig.ShmSize = config.DefaultShmSize if daemon.configStore != nil { hostConfig.ShmSize = int64(daemon.configStore.ShmSize) } } // Set default IPC mode, if unset for container if hostConfig.IpcMode.IsEmpty() { m := config.DefaultIpcMode if daemon.configStore != nil { m = containertypes.IpcMode(daemon.configStore.IpcMode) } hostConfig.IpcMode = m } // Set default cgroup namespace mode, if unset for container if hostConfig.CgroupnsMode.IsEmpty() { // for cgroup v2: unshare cgroupns even for privileged containers // https://github.com/containers/libpod/pull/4374#issuecomment-549776387 if hostConfig.Privileged && cgroups.Mode() != cgroups.Unified { hostConfig.CgroupnsMode = containertypes.CgroupnsModeHost } else { m := containertypes.CgroupnsModeHost if cgroups.Mode() == cgroups.Unified { m = containertypes.CgroupnsModePrivate } if daemon.configStore != nil { m = containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode) } hostConfig.CgroupnsMode = m } } adaptSharedNamespaceContainer(daemon, hostConfig) var err error secOpts, err := daemon.generateSecurityOpt(hostConfig) if err != nil { return err } hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, secOpts...) if hostConfig.OomKillDisable == nil { defaultOomKillDisable := false hostConfig.OomKillDisable = &defaultOomKillDisable } return nil } // adaptSharedNamespaceContainer replaces container name with its ID in hostConfig. // To be more precisely, it modifies `container:name` to `container:ID` of PidMode, IpcMode // and NetworkMode. // // When a container shares its namespace with another container, use ID can keep the namespace // sharing connection between the two containers even the another container is renamed. func adaptSharedNamespaceContainer(daemon containerGetter, hostConfig *containertypes.HostConfig) { containerPrefix := "container:" if hostConfig.PidMode.IsContainer() { pidContainer := hostConfig.PidMode.Container() // if there is any error returned here, we just ignore it and leave it to be // handled in the following logic if c, err := daemon.GetContainer(pidContainer); err == nil { hostConfig.PidMode = containertypes.PidMode(containerPrefix + c.ID) } } if hostConfig.IpcMode.IsContainer() { ipcContainer := hostConfig.IpcMode.Container() if c, err := daemon.GetContainer(ipcContainer); err == nil { hostConfig.IpcMode = containertypes.IpcMode(containerPrefix + c.ID) } } if hostConfig.NetworkMode.IsContainer() { netContainer := hostConfig.NetworkMode.ConnectedContainer() if c, err := daemon.GetContainer(netContainer); err == nil { hostConfig.NetworkMode = containertypes.NetworkMode(containerPrefix + c.ID) } } } // verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration func verifyPlatformContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) (warnings []string, err error) { fixMemorySwappiness(resources) // memory subsystem checks and adjustments if resources.Memory != 0 && resources.Memory < linuxMinMemory { return warnings, fmt.Errorf("Minimum memory limit allowed is 6MB") } if resources.Memory > 0 && !sysInfo.MemoryLimit { warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.Memory = 0 resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.") resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") } if resources.Memory == 0 && resources.MemorySwap > 0 && !update { return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") } if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness { warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") resources.MemorySwappiness = nil } if resources.MemorySwappiness != nil { swappiness := *resources.MemorySwappiness if swappiness < 0 || swappiness > 100 { return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) } } if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.MemoryReservation = 0 } if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { return warnings, fmt.Errorf("Minimum memory reservation allowed is 6MB") } if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage") } if resources.KernelMemory > 0 { // Kernel memory limit is not supported on cgroup v2. // Even on cgroup v1, kernel memory limit (`kmem.limit_in_bytes`) has been deprecated since kernel 5.4. // https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0b5adf44cae99b3ebcc7 warnings = append(warnings, "Specifying a kernel memory limit is deprecated and will be removed in a future release.") } if resources.KernelMemory > 0 && !sysInfo.KernelMemory { warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.KernelMemory = 0 } if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") } if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) { warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") } if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point // warning the caller if they already wanted the feature to be off if *resources.OomKillDisable { warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") } resources.OomKillDisable = nil } if resources.OomKillDisable != nil && *resources.OomKillDisable && resources.Memory == 0 { warnings = append(warnings, "OOM killer is disabled for the container, but no memory limit is set, this can result in the system running out of resources.") } if resources.PidsLimit != nil && !sysInfo.PidsLimit { if *resources.PidsLimit > 0 { warnings = append(warnings, "Your kernel does not support PIDs limit capabilities or the cgroup is not mounted. PIDs limit discarded.") } resources.PidsLimit = nil } // cpu subsystem checks and adjustments if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 { return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set") } if resources.NanoCPUs > 0 && resources.CPUQuota > 0 { return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set") } if resources.NanoCPUs > 0 && !sysInfo.CPUCfs { return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU CFS scheduler or the cgroup is not mounted") } // The highest precision we could get on Linux is 0.001, by setting // cpu.cfs_period_us=1000ms // cpu.cfs_quota=1ms // See the following link for details: // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt // Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error. // The error message is 0.01 so that this is consistent with Windows if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } if resources.CPUShares > 0 && !sysInfo.CPUShares { warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") resources.CPUShares = 0 } if (resources.CPUPeriod != 0 || resources.CPUQuota != 0) && !sysInfo.CPUCfs { warnings = append(warnings, "Your kernel does not support CPU CFS scheduler. CPU period/quota discarded.") resources.CPUPeriod = 0 resources.CPUQuota = 0 } if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") } if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") } if resources.CPUPercent > 0 { warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS)) resources.CPUPercent = 0 } // cpuset subsystem checks and adjustments if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") resources.CpusetCpus = "" resources.CpusetMems = "" } cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) if err != nil { return warnings, errors.Wrapf(err, "Invalid value %s for cpuset cpus", resources.CpusetCpus) } if !cpusAvailable { return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) } memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) if err != nil { return warnings, errors.Wrapf(err, "Invalid value %s for cpuset mems", resources.CpusetMems) } if !memsAvailable { return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) } // blkio subsystem checks and adjustments if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") resources.BlkioWeight = 0 } if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") } if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) } if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} } if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.") resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} } return warnings, nil } func (daemon *Daemon) getCgroupDriver() string { if UsingSystemd(daemon.configStore) { return cgroupSystemdDriver } if daemon.Rootless() { return cgroupNoneDriver } return cgroupFsDriver } // getCD gets the raw value of the native.cgroupdriver option, if set. func getCD(config *config.Config) string { for _, option := range config.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { continue } return val } return "" } // verifyCgroupDriver validates native.cgroupdriver func verifyCgroupDriver(config *config.Config) error { cd := getCD(config) if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { return nil } if cd == cgroupNoneDriver { return fmt.Errorf("native.cgroupdriver option %s is internally used and cannot be specified manually", cd) } return fmt.Errorf("native.cgroupdriver option %s not supported", cd) } // UsingSystemd returns true if cli option includes native.cgroupdriver=systemd func UsingSystemd(config *config.Config) bool { cd := getCD(config) if cd == cgroupSystemdDriver { return true } // On cgroup v2 hosts, default to systemd driver if cd == "" && cgroups.Mode() == cgroups.Unified && isRunningSystemd() { return true } return false } var ( runningSystemd bool detectSystemd sync.Once ) // isRunningSystemd checks whether the host was booted with systemd as its init // system. This functions similarly to systemd's `sd_booted(3)`: internally, it // checks whether /run/systemd/system/ exists and is a directory. // http://www.freedesktop.org/software/systemd/man/sd_booted.html // // NOTE: This function comes from package github.com/coreos/go-systemd/util // It was borrowed here to avoid a dependency on cgo. func isRunningSystemd() bool { detectSystemd.Do(func() { fi, err := os.Lstat("/run/systemd/system") if err != nil { return } runningSystemd = fi.IsDir() }) return runningSystemd } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) { if hostConfig == nil { return nil, nil } sysInfo := daemon.RawSysInfo() w, err := verifyPlatformContainerResources(&hostConfig.Resources, sysInfo, update) // no matter err is nil or not, w could have data in itself. warnings = append(warnings, w...) if err != nil { return warnings, err } if hostConfig.ShmSize < 0 { return warnings, fmt.Errorf("SHM size can not be less than 0") } if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) } // ip-forwarding does not affect container with '--net=host' (or '--net=none') if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") } if hostConfig.NetworkMode.IsHost() && len(hostConfig.PortBindings) > 0 { warnings = append(warnings, "Published ports are discarded when using host network mode") } // check for various conflicting options with user namespaces if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { if hostConfig.Privileged { return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces. You must run the container in the host namespace when running privileged mode") } if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("cannot share the host's network namespace when user namespaces are enabled") } if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled") } } if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { // CgroupParent for systemd cgroup should be named as "xxx.slice" if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if hostConfig.Runtime == "" { hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() } if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) } parser := volumemounts.NewParser() for dest := range hostConfig.Tmpfs { if err := parser.ValidateTmpfsMountDestination(dest); err != nil { return warnings, err } } if !hostConfig.CgroupnsMode.Valid() { return warnings, fmt.Errorf("invalid cgroup namespace mode: %v", hostConfig.CgroupnsMode) } if hostConfig.CgroupnsMode.IsPrivate() { if !sysInfo.CgroupNamespaces { warnings = append(warnings, "Your kernel does not support cgroup namespaces. Cgroup namespace setting discarded.") } } if hostConfig.Runtime == config.LinuxV1RuntimeName || (hostConfig.Runtime == "" && daemon.configStore.DefaultRuntime == config.LinuxV1RuntimeName) { warnings = append(warnings, fmt.Sprintf("Configured runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName)) } return warnings, nil } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(conf *config.Config) error { if conf.ContainerdNamespace == conf.ContainerdPluginNamespace { return errors.New("containers namespace and plugins namespace cannot be the same") } // Check for mutually incompatible config options if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" { return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") } if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication { return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") } if conf.BridgeConfig.EnableIP6Tables && !conf.Experimental { return fmt.Errorf("ip6tables rules are only available if experimental features are enabled") } if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq { conf.BridgeConfig.EnableIPMasq = false } if err := verifyCgroupDriver(conf); err != nil { return err } if conf.CgroupParent != "" && UsingSystemd(conf) { if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") { return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if conf.Rootless && UsingSystemd(conf) && cgroups.Mode() != cgroups.Unified { return fmt.Errorf("exec-opt native.cgroupdriver=systemd requires cgroup v2 for rootless mode") } configureRuntimes(conf) if rtName := conf.GetDefaultRuntimeName(); rtName != "" { if conf.GetRuntime(rtName) == nil { return fmt.Errorf("specified default runtime '%s' does not exist", rtName) } if rtName == config.LinuxV1RuntimeName { logrus.Warnf("Configured default runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName) } } return nil } // checkSystem validates platform-specific requirements func checkSystem() error { return checkKernel() } // configureMaxThreads sets the Go runtime max threads threshold // which is 90% of the kernel setting from /proc/sys/kernel/threads-max func configureMaxThreads(config *config.Config) error { mt, err := os.ReadFile("/proc/sys/kernel/threads-max") if err != nil { return err } mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) if err != nil { return err } maxThreads := (mtint / 100) * 90 debug.SetMaxThreads(maxThreads) logrus.Debugf("Golang's threads limit set to %d", maxThreads) return nil } func overlaySupportsSelinux() (bool, error) { f, err := os.Open("/proc/kallsyms") if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.HasSuffix(s.Text(), " security_inode_copy_up") { return true, nil } } return false, s.Err() } // configureKernelSecuritySupport configures and validates security support for the kernel func configureKernelSecuritySupport(config *config.Config, driverName string) error { if config.EnableSelinuxSupport { if !selinux.GetEnabled() { logrus.Warn("Docker could not enable SELinux on the host system") return nil } if driverName == "overlay" || driverName == "overlay2" { // If driver is overlay or overlay2, make sure kernel // supports selinux with overlay. supported, err := overlaySupportsSelinux() if err != nil { return err } if !supported { logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName) } } } else { selinux.SetDisabled() } return nil } func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) if err != nil { return nil, err } controller, err := libnetwork.New(netOptions...) if err != nil { return nil, fmt.Errorf("error obtaining controller instance: %v", err) } if len(activeSandboxes) > 0 { logrus.Info("There are old running containers, the network config will not take affect") return controller, nil } // Initialize default network on "null" if n, _ := controller.NetworkByName("none"); n == nil { if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) } } // Initialize default network on "host" if n, _ := controller.NetworkByName("host"); n == nil { if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) } } // Clear stale bridge network if n, err := controller.NetworkByName("bridge"); err == nil { if err = n.Delete(); err != nil { return nil, fmt.Errorf("could not delete the default bridge network: %v", err) } if len(config.NetworkConfig.DefaultAddressPools.Value()) > 0 && !daemon.configStore.LiveRestoreEnabled { removeDefaultBridgeInterface() } } if !config.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, config); err != nil { return nil, err } } else { removeDefaultBridgeInterface() } // Set HostGatewayIP to the default bridge's IP if it is empty if daemon.configStore.HostGatewayIP == nil && controller != nil { if n, err := controller.NetworkByName("bridge"); err == nil { v4Info, v6Info := n.Info().IpamInfo() var gateway net.IP if len(v4Info) > 0 { gateway = v4Info[0].Gateway.IP } else if len(v6Info) > 0 { gateway = v6Info[0].Gateway.IP } daemon.configStore.HostGatewayIP = gateway } } return controller, nil } func driverOptions(config *config.Config) nwconfig.Option { return nwconfig.OptionDriverConfig("bridge", options.Generic{ netlabel.GenericData: options.Generic{ "EnableIPForwarding": config.BridgeConfig.EnableIPForward, "EnableIPTables": config.BridgeConfig.EnableIPTables, "EnableIP6Tables": config.BridgeConfig.EnableIP6Tables, "EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy, "UserlandProxyPath": config.BridgeConfig.UserlandProxyPath, }, }) } func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { bridgeName := bridge.DefaultBridgeName if config.BridgeConfig.Iface != "" { bridgeName = config.BridgeConfig.Iface } netOption := map[string]string{ bridge.BridgeName: bridgeName, bridge.DefaultBridge: strconv.FormatBool(true), netlabel.DriverMTU: strconv.Itoa(config.Mtu), bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq), bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication), } // --ip processing if config.BridgeConfig.DefaultIP != nil { netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String() } ipamV4Conf := &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) if err != nil { return errors.Wrap(err, "list bridge addresses failed") } nw := nwList[0] if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" { _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return errors.Wrap(err, "parse CIDR failed") } // Iterate through in case there are multiple addresses for the bridge for _, entry := range nwList { if fCIDR.Contains(entry.IP) { nw = entry break } } } ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) if hip.IsGlobalUnicast() { ipamV4Conf.Gateway = nw.IP.String() } if config.BridgeConfig.IP != "" { ip, ipNet, err := net.ParseCIDR(config.BridgeConfig.IP) if err != nil { return err } ipamV4Conf.PreferredPool = ipNet.String() ipamV4Conf.Gateway = ip.String() } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) } if config.BridgeConfig.FixedCIDR != "" { _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return err } ipamV4Conf.SubPool = fCIDR.String() } if config.BridgeConfig.DefaultGatewayIPv4 != nil { ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String() } var ( deferIPv6Alloc bool ipamV6Conf *libnetwork.IpamConf ) if config.BridgeConfig.EnableIPv6 && config.BridgeConfig.FixedCIDRv6 == "" { return errdefs.InvalidParameter(errors.New("IPv6 is enabled for the default bridge, but no subnet is configured. Specify an IPv6 subnet using --fixed-cidr-v6")) } else if config.BridgeConfig.FixedCIDRv6 != "" { _, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6) if err != nil { return err } // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has // at least 48 host bits, we need to guarantee the current behavior where the containers' // IPv6 addresses will be constructed based on the containers' interface MAC address. // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints // on this network until after the driver has created the endpoint and returned the // constructed address. Libnetwork will then reserve this address with the ipam driver. ones, _ := fCIDRv6.Mask.Size() deferIPv6Alloc = ones <= 80 ipamV6Conf = &libnetwork.IpamConf{ AuxAddresses: make(map[string]string), PreferredPool: fCIDRv6.String(), } // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 // address belongs to the same network, we need to inform libnetwork about it, so // that it can be reserved with IPAM and it will not be given away to somebody else for _, nw6 := range nw6List { if fCIDRv6.Contains(nw6.IP) { ipamV6Conf.Gateway = nw6.IP.String() break } } } if config.BridgeConfig.DefaultGatewayIPv6 != nil { if ipamV6Conf == nil { ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} } ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String() } v4Conf := []*libnetwork.IpamConf{ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} if ipamV6Conf != nil { v6Conf = append(v6Conf, ipamV6Conf) } // Initialize default network on "bridge" with the same name _, err = controller.NewNetwork("bridge", "bridge", "", libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6), libnetwork.NetworkOptionDriverOpts(netOption), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) if err != nil { return fmt.Errorf("Error creating default \"bridge\" network: %v", err) } return nil } // Remove default bridge interface if present (--bridge=none use case) func removeDefaultBridgeInterface() { if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil { if err := netlink.LinkDel(lnk); err != nil { logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) } } } func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error { return func(initPath containerfs.ContainerFS) error { return initlayer.Setup(initPath, idMapping.RootPair()) } } // Parse the remapped root (user namespace) option, which can be one of: // username - valid username from /etc/passwd // username:groupname - valid username; valid groupname from /etc/group // uid - 32-bit unsigned int valid Linux UID value // uid:gid - uid value; 32-bit unsigned int Linux GID value // // If no groupname is specified, and a username is specified, an attempt // will be made to lookup a gid for that username as a groupname // // If names are used, they are verified to exist in passwd/group func parseRemappedRoot(usergrp string) (string, string, error) { var ( userID, groupID int username, groupname string ) idparts := strings.Split(usergrp, ":") if len(idparts) > 2 { return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) } if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { // must be a uid; take it as valid userID = int(uid) luser, err := idtools.LookupUID(userID) if err != nil { return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) } username = luser.Name if len(idparts) == 1 { // if the uid was numeric and no gid was specified, take the uid as the gid groupID = userID lgrp, err := idtools.LookupGID(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) } groupname = lgrp.Name } } else { lookupName := idparts[0] // special case: if the user specified "default", they want Docker to create or // use (after creation) the "dockremap" user/group for root remapping if lookupName == defaultIDSpecifier { lookupName = defaultRemappedID } luser, err := idtools.LookupUser(lookupName) if err != nil && idparts[0] != defaultIDSpecifier { // error if the name requested isn't the special "dockremap" ID return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) } else if err != nil { // special case-- if the username == "default", then we have been asked // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} // ranges will be used for the user and group mappings in user namespaced containers _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) if err == nil { return defaultRemappedID, defaultRemappedID, nil } return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) } username = luser.Name if len(idparts) == 1 { // we only have a string username, and no group specified; look up gid from username as group group, err := idtools.LookupGroup(lookupName) if err != nil { return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) } groupname = group.Name } } if len(idparts) == 2 { // groupname or gid is separately specified and must be resolved // to an unsigned 32-bit gid if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { // must be a gid, take it as valid groupID = int(gid) lgrp, err := idtools.LookupGID(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) } groupname = lgrp.Name } else { // not a number; attempt a lookup if _, err := idtools.LookupGroup(idparts[1]); err != nil { return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) } groupname = idparts[1] } } return username, groupname, nil } func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) { if runtime.GOOS != "linux" && config.RemappedRoot != "" { return nil, fmt.Errorf("User namespaces are only supported on Linux") } // if the daemon was started with remapped root option, parse // the config option to the int uid,gid values if config.RemappedRoot != "" { username, groupname, err := parseRemappedRoot(config.RemappedRoot) if err != nil { return nil, err } if username == "root" { // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op // effectively logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") return &idtools.IdentityMapping{}, nil } logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s", username) // update remapped root setting now that we have resolved them to actual names config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) mappings, err := idtools.NewIdentityMapping(username) if err != nil { return nil, errors.Wrap(err, "Can't create ID mappings") } return mappings, nil } return &idtools.IdentityMapping{}, nil } func setupDaemonRoot(config *config.Config, rootDir string, remappedRoot idtools.Identity) error { config.Root = rootDir // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) // so that syscalls executing as non-root, operating on subdirectories of the graph root // (e.g. mounted layers of a container) can traverse this path. // The user namespace support will create subdirectories for the remapped root host uid:gid // pair owned by that same uid:gid pair for proper write access to those needed metadata and // layer content subtrees. if _, err := os.Stat(rootDir); err == nil { // root current exists; verify the access bits are correct by setting them if err = os.Chmod(rootDir, 0711); err != nil { return err } } else if os.IsNotExist(err) { // no root exists yet, create it 0711 with root:root ownership if err := os.MkdirAll(rootDir, 0711); err != nil { return err } } id := idtools.Identity{UID: idtools.CurrentIdentity().UID, GID: remappedRoot.GID} // First make sure the current root dir has the correct perms. if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil { return errors.Wrapf(err, "could not create or set daemon root permissions: %s", config.Root) } // if user namespaces are enabled we will create a subtree underneath the specified root // with any/all specified remapped root uid/gid options on the daemon creating // a new subdirectory with ownership set to the remapped uid/gid (so as to allow // `chdir()` to work for containers namespaced to that uid/gid) if config.RemappedRoot != "" { config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", remappedRoot.UID, remappedRoot.GID)) logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) // Create the root directory if it doesn't exist if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil { return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) } // we also need to verify that any pre-existing directories in the path to // the graphroot won't block access to remapped root--if any pre-existing directory // has strict permissions that don't allow "x", container start will fail, so // better to warn and fail now dirPath := config.Root for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if !idtools.CanAccess(dirPath, remappedRoot) { return fmt.Errorf("a subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories", config.Root) } } } if err := setupDaemonRootPropagation(config); err != nil { logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior") } return nil } func setupDaemonRootPropagation(cfg *config.Config) error { rootParentMount, mountOptions, err := getSourceMount(cfg.Root) if err != nil { return errors.Wrap(err, "error getting daemon root's parent mount") } var cleanupOldFile bool cleanupFile := getUnmountOnShutdownPath(cfg) defer func() { if !cleanupOldFile { return } if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) { logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file") } }() if hasMountInfoOption(mountOptions, sharedPropagationOption, slavePropagationOption) { cleanupOldFile = true return nil } if err := mount.MakeShared(cfg.Root); err != nil { return errors.Wrap(err, "could not setup daemon root propagation to shared") } // check the case where this may have already been a mount to itself. // If so then the daemon only performed a remount and should not try to unmount this later. if rootParentMount == cfg.Root { cleanupOldFile = true return nil } if err := os.MkdirAll(filepath.Dir(cleanupFile), 0700); err != nil { return errors.Wrap(err, "error creating dir to store mount cleanup file") } if err := os.WriteFile(cleanupFile, nil, 0600); err != nil { return errors.Wrap(err, "error writing file to signal mount cleanup on shutdown") } return nil } // getUnmountOnShutdownPath generates the path to used when writing the file that signals to the daemon that on shutdown // the daemon root should be unmounted. func getUnmountOnShutdownPath(config *config.Config) string { return filepath.Join(config.ExecRoot, "unmount-on-shutdown") } // registerLinks writes the links to a file. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { return nil } for _, l := range hostConfig.Links { name, alias, err := opts.ParseLink(l) if err != nil { return err } child, err := daemon.GetContainer(name) if err != nil { if errdefs.IsNotFound(err) { // Trying to link to a non-existing container is not valid, and // should return an "invalid parameter" error. Returning a "not // found" error here would make the client report the container's // image could not be found (see moby/moby#39823) err = errdefs.InvalidParameter(err) } return errors.Wrapf(err, "could not get container for %s", name) } for child.HostConfig.NetworkMode.IsContainer() { parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) child, err = daemon.GetContainer(parts[1]) if err != nil { if errdefs.IsNotFound(err) { // Trying to link to a non-existing container is not valid, and // should return an "invalid parameter" error. Returning a "not // found" error here would make the client report the container's // image could not be found (see moby/moby#39823) err = errdefs.InvalidParameter(err) } return errors.Wrapf(err, "Could not get container for %s", parts[1]) } } if child.HostConfig.NetworkMode.IsHost() { return runconfig.ErrConflictHostNetworkAndLinks } if err := daemon.registerLink(container, child, alias); err != nil { return err } } // After we load all the links into the daemon // set them to nil on the hostconfig _, err := container.WriteHostConfig() return err } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { return daemon.Unmount(container) } func copyBlkioEntry(entries []*statsV1.BlkIOEntry) []types.BlkioStatEntry { out := make([]types.BlkioStatEntry, len(entries)) for i, re := range entries { out[i] = types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: re.Op, Value: re.Value, } } return out } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning(c.ID) } cs, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { if strings.Contains(err.Error(), "container not found") { return nil, containerNotFound(c.ID) } return nil, err } s := &types.StatsJSON{} s.Read = cs.Read stats := cs.Metrics switch t := stats.(type) { case *statsV1.Metrics: return daemon.statsV1(s, t) case *statsV2.Metrics: return daemon.statsV2(s, t) default: return nil, errors.Errorf("unexpected type of metrics %+v", t) } } func (daemon *Daemon) statsV1(s *types.StatsJSON, stats *statsV1.Metrics) (*types.StatsJSON, error) { if stats.Blkio != nil { s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: copyBlkioEntry(stats.Blkio.IoServiceBytesRecursive), IoServicedRecursive: copyBlkioEntry(stats.Blkio.IoServicedRecursive), IoQueuedRecursive: copyBlkioEntry(stats.Blkio.IoQueuedRecursive), IoServiceTimeRecursive: copyBlkioEntry(stats.Blkio.IoServiceTimeRecursive), IoWaitTimeRecursive: copyBlkioEntry(stats.Blkio.IoWaitTimeRecursive), IoMergedRecursive: copyBlkioEntry(stats.Blkio.IoMergedRecursive), IoTimeRecursive: copyBlkioEntry(stats.Blkio.IoTimeRecursive), SectorsRecursive: copyBlkioEntry(stats.Blkio.SectorsRecursive), } } if stats.CPU != nil { s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: stats.CPU.Usage.Total, PercpuUsage: stats.CPU.Usage.PerCPU, UsageInKernelmode: stats.CPU.Usage.Kernel, UsageInUsermode: stats.CPU.Usage.User, }, ThrottlingData: types.ThrottlingData{ Periods: stats.CPU.Throttling.Periods, ThrottledPeriods: stats.CPU.Throttling.ThrottledPeriods, ThrottledTime: stats.CPU.Throttling.ThrottledTime, }, } } if stats.Memory != nil { raw := map[string]uint64{ "cache": stats.Memory.Cache, "rss": stats.Memory.RSS, "rss_huge": stats.Memory.RSSHuge, "mapped_file": stats.Memory.MappedFile, "dirty": stats.Memory.Dirty, "writeback": stats.Memory.Writeback, "pgpgin": stats.Memory.PgPgIn, "pgpgout": stats.Memory.PgPgOut, "pgfault": stats.Memory.PgFault, "pgmajfault": stats.Memory.PgMajFault, "inactive_anon": stats.Memory.InactiveAnon, "active_anon": stats.Memory.ActiveAnon, "inactive_file": stats.Memory.InactiveFile, "active_file": stats.Memory.ActiveFile, "unevictable": stats.Memory.Unevictable, "hierarchical_memory_limit": stats.Memory.HierarchicalMemoryLimit, "hierarchical_memsw_limit": stats.Memory.HierarchicalSwapLimit, "total_cache": stats.Memory.TotalCache, "total_rss": stats.Memory.TotalRSS, "total_rss_huge": stats.Memory.TotalRSSHuge, "total_mapped_file": stats.Memory.TotalMappedFile, "total_dirty": stats.Memory.TotalDirty, "total_writeback": stats.Memory.TotalWriteback, "total_pgpgin": stats.Memory.TotalPgPgIn, "total_pgpgout": stats.Memory.TotalPgPgOut, "total_pgfault": stats.Memory.TotalPgFault, "total_pgmajfault": stats.Memory.TotalPgMajFault, "total_inactive_anon": stats.Memory.TotalInactiveAnon, "total_active_anon": stats.Memory.TotalActiveAnon, "total_inactive_file": stats.Memory.TotalInactiveFile, "total_active_file": stats.Memory.TotalActiveFile, "total_unevictable": stats.Memory.TotalUnevictable, } if stats.Memory.Usage != nil { s.MemoryStats = types.MemoryStats{ Stats: raw, Usage: stats.Memory.Usage.Usage, MaxUsage: stats.Memory.Usage.Max, Limit: stats.Memory.Usage.Limit, Failcnt: stats.Memory.Usage.Failcnt, } } else { s.MemoryStats = types.MemoryStats{ Stats: raw, } } // if the container does not set memory limit, use the machineMemory if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { s.MemoryStats.Limit = daemon.machineMemory } } if stats.Pids != nil { s.PidsStats = types.PidsStats{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, } } return s, nil } func (daemon *Daemon) statsV2(s *types.StatsJSON, stats *statsV2.Metrics) (*types.StatsJSON, error) { if stats.Io != nil { var isbr []types.BlkioStatEntry for _, re := range stats.Io.Usage { isbr = append(isbr, types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: "read", Value: re.Rbytes, }, types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: "write", Value: re.Wbytes, }, ) } s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: isbr, // Other fields are unsupported } } if stats.CPU != nil { s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: stats.CPU.UsageUsec * 1000, // PercpuUsage is not supported UsageInKernelmode: stats.CPU.SystemUsec * 1000, UsageInUsermode: stats.CPU.UserUsec * 1000, }, ThrottlingData: types.ThrottlingData{ Periods: stats.CPU.NrPeriods, ThrottledPeriods: stats.CPU.NrThrottled, ThrottledTime: stats.CPU.ThrottledUsec * 1000, }, } } if stats.Memory != nil { s.MemoryStats = types.MemoryStats{ // Stats is not compatible with v1 Stats: map[string]uint64{ "anon": stats.Memory.Anon, "file": stats.Memory.File, "kernel_stack": stats.Memory.KernelStack, "slab": stats.Memory.Slab, "sock": stats.Memory.Sock, "shmem": stats.Memory.Shmem, "file_mapped": stats.Memory.FileMapped, "file_dirty": stats.Memory.FileDirty, "file_writeback": stats.Memory.FileWriteback, "anon_thp": stats.Memory.AnonThp, "inactive_anon": stats.Memory.InactiveAnon, "active_anon": stats.Memory.ActiveAnon, "inactive_file": stats.Memory.InactiveFile, "active_file": stats.Memory.ActiveFile, "unevictable": stats.Memory.Unevictable, "slab_reclaimable": stats.Memory.SlabReclaimable, "slab_unreclaimable": stats.Memory.SlabUnreclaimable, "pgfault": stats.Memory.Pgfault, "pgmajfault": stats.Memory.Pgmajfault, "workingset_refault": stats.Memory.WorkingsetRefault, "workingset_activate": stats.Memory.WorkingsetActivate, "workingset_nodereclaim": stats.Memory.WorkingsetNodereclaim, "pgrefill": stats.Memory.Pgrefill, "pgscan": stats.Memory.Pgscan, "pgsteal": stats.Memory.Pgsteal, "pgactivate": stats.Memory.Pgactivate, "pgdeactivate": stats.Memory.Pgdeactivate, "pglazyfree": stats.Memory.Pglazyfree, "pglazyfreed": stats.Memory.Pglazyfreed, "thp_fault_alloc": stats.Memory.ThpFaultAlloc, "thp_collapse_alloc": stats.Memory.ThpCollapseAlloc, }, Usage: stats.Memory.Usage, // MaxUsage is not supported Limit: stats.Memory.UsageLimit, } // if the container does not set memory limit, use the machineMemory if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { s.MemoryStats.Limit = daemon.machineMemory } if stats.MemoryEvents != nil { // Failcnt is set to the "oom" field of the "memory.events" file. // See https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html s.MemoryStats.Failcnt = stats.MemoryEvents.Oom } } if stats.Pids != nil { s.PidsStats = types.PidsStats{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, } } return s, nil } // setDefaultIsolation determines the default isolation mode for the // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { return nil } // setupDaemonProcess sets various settings for the daemon's process func setupDaemonProcess(config *config.Config) error { // setup the daemons oom_score_adj if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil { return err } if err := setMayDetachMounts(); err != nil { logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter") } return nil } // This is used to allow removal of mountpoints that may be mounted in other // namespaces on RHEL based kernels starting from RHEL 7.4. // Without this setting, removals on these RHEL based kernels may fail with // "device or resource busy". // This setting is not available in upstream kernels as it is not configurable, // but has been in the upstream kernels since 3.15. func setMayDetachMounts() error { f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0) if err != nil { if os.IsNotExist(err) { return nil } return errors.Wrap(err, "error opening may_detach_mounts kernel config file") } defer f.Close() _, err = f.WriteString("1") if os.IsPermission(err) { // Setting may_detach_mounts does not work in an // unprivileged container. Ignore the error, but log // it if we appear not to be in that situation. if !userns.RunningInUserNS() { logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1") } return nil } return err } func setupOOMScoreAdj(score int) error { if score == 0 { return nil } f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) if err != nil { return err } defer f.Close() stringScore := strconv.Itoa(score) _, err = f.WriteString(stringScore) if os.IsPermission(err) { // Setting oom_score_adj does not work in an // unprivileged container. Ignore the error, but log // it if we appear not to be in that situation. if !userns.RunningInUserNS() { logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore) } return nil } return err } func (daemon *Daemon) initCPURtController(mnt, path string) error { if path == "/" || path == "." { return nil } // Recursively create cgroup to ensure that the system and all parent cgroups have values set // for the period and runtime as this limits what the children can be set to. if err := daemon.initCPURtController(mnt, filepath.Dir(path)); err != nil { return err } path = filepath.Join(mnt, path) if err := os.MkdirAll(path, 0755); err != nil { return err } if err := maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil { return err } return maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path) } func maybeCreateCPURealTimeFile(configValue int64, file string, path string) error { if configValue == 0 { return nil } return os.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700) } func (daemon *Daemon) setupSeccompProfile() error { switch profile := daemon.configStore.SeccompProfile; profile { case "", config.SeccompProfileDefault: daemon.seccompProfilePath = config.SeccompProfileDefault case config.SeccompProfileUnconfined: daemon.seccompProfilePath = config.SeccompProfileUnconfined default: daemon.seccompProfilePath = profile b, err := os.ReadFile(profile) if err != nil { return fmt.Errorf("opening seccomp profile (%s) failed: %v", profile, err) } daemon.seccompProfile = b } return nil } // RawSysInfo returns *sysinfo.SysInfo . func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo { var siOpts []sysinfo.Opt if daemon.getCgroupDriver() == cgroupSystemdDriver { if euid := os.Getenv("ROOTLESSKIT_PARENT_EUID"); euid != "" { siOpts = append(siOpts, sysinfo.WithCgroup2GroupPath("/user.slice/user-"+euid+".slice")) } } return sysinfo.New(siOpts...) } func recursiveUnmount(target string) error { return mount.RecursiveUnmount(target) } func (daemon *Daemon) initLibcontainerd(ctx context.Context) error { var err error daemon.containerd, err = remote.NewClient( ctx, daemon.containerdCli, filepath.Join(daemon.configStore.ExecRoot, "containerd"), daemon.configStore.ContainerdNamespace, daemon, ) return err }
//go:build linux || freebsd // +build linux freebsd package daemon // import "github.com/docker/docker/daemon" import ( "bufio" "context" "fmt" "net" "os" "path/filepath" "runtime" "runtime/debug" "strconv" "strings" "sync" "time" "github.com/containerd/cgroups" statsV1 "github.com/containerd/cgroups/stats/v1" statsV2 "github.com/containerd/cgroups/v2/stats" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/blkiodev" pblkiodev "github.com/docker/docker/api/types/blkiodev" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/initlayer" "github.com/docker/docker/errdefs" "github.com/docker/docker/libcontainerd/remote" "github.com/docker/docker/libnetwork" nwconfig "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/drivers/bridge" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/netutils" "github.com/docker/docker/libnetwork/options" lntypes "github.com/docker/docker/libnetwork/types" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/runconfig" volumemounts "github.com/docker/docker/volume/mounts" "github.com/moby/sys/mount" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" ) const ( isWindows = false // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 linuxMinCPUShares = 2 linuxMaxCPUShares = 262144 platformSupported = true // It's not kernel limit, we want this 6M limit to account for overhead during startup, and to supply a reasonable functional container linuxMinMemory = 6291456 // constants for remapped root settings defaultIDSpecifier = "default" defaultRemappedID = "dockremap" // constant for cgroup drivers cgroupFsDriver = "cgroupfs" cgroupSystemdDriver = "systemd" cgroupNoneDriver = "none" ) type containerGetter interface { GetContainer(string) (*container.Container, error) } func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory { memory := specs.LinuxMemory{} if config.Memory > 0 { memory.Limit = &config.Memory } if config.MemoryReservation > 0 { memory.Reservation = &config.MemoryReservation } if config.MemorySwap > 0 { memory.Swap = &config.MemorySwap } if config.MemorySwappiness != nil { swappiness := uint64(*config.MemorySwappiness) memory.Swappiness = &swappiness } if config.OomKillDisable != nil { memory.DisableOOMKiller = config.OomKillDisable } if config.KernelMemory != 0 { memory.Kernel = &config.KernelMemory } if config.KernelMemoryTCP != 0 { memory.KernelTCP = &config.KernelMemoryTCP } return &memory } func getPidsLimit(config containertypes.Resources) *specs.LinuxPids { if config.PidsLimit == nil { return nil } if *config.PidsLimit <= 0 { // docker API allows 0 and negative values to unset this to be consistent // with default values. When updating values, runc requires -1 to unset // the previous limit. return &specs.LinuxPids{Limit: -1} } return &specs.LinuxPids{Limit: *config.PidsLimit} } func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) { cpu := specs.LinuxCPU{} if config.CPUShares < 0 { return nil, fmt.Errorf("shares: invalid argument") } if config.CPUShares >= 0 { shares := uint64(config.CPUShares) cpu.Shares = &shares } if config.CpusetCpus != "" { cpu.Cpus = config.CpusetCpus } if config.CpusetMems != "" { cpu.Mems = config.CpusetMems } if config.NanoCPUs > 0 { // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt period := uint64(100 * time.Millisecond / time.Microsecond) quota := config.NanoCPUs * int64(period) / 1e9 cpu.Period = &period cpu.Quota = &quota } if config.CPUPeriod != 0 { period := uint64(config.CPUPeriod) cpu.Period = &period } if config.CPUQuota != 0 { q := config.CPUQuota cpu.Quota = &q } if config.CPURealtimePeriod != 0 { period := uint64(config.CPURealtimePeriod) cpu.RealtimePeriod = &period } if config.CPURealtimeRuntime != 0 { c := config.CPURealtimeRuntime cpu.RealtimeRuntime = &c } return &cpu, nil } func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) { var stat unix.Stat_t var blkioWeightDevices []specs.LinuxWeightDevice for _, weightDevice := range config.BlkioWeightDevice { if err := unix.Stat(weightDevice.Path, &stat); err != nil { return nil, errors.WithStack(&os.PathError{Op: "stat", Path: weightDevice.Path, Err: err}) } weight := weightDevice.Weight d := specs.LinuxWeightDevice{Weight: &weight} // The type is 32bit on mips. d.Major = int64(unix.Major(uint64(stat.Rdev))) //nolint: unconvert d.Minor = int64(unix.Minor(uint64(stat.Rdev))) //nolint: unconvert blkioWeightDevices = append(blkioWeightDevices, d) } return blkioWeightDevices, nil } func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { container.NoNewPrivileges = daemon.configStore.NoNewPrivileges return parseSecurityOpt(container, hostConfig) } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { var ( labelOpts []string err error ) for _, opt := range config.SecurityOpt { if opt == "no-new-privileges" { container.NoNewPrivileges = true continue } if opt == "disable" { labelOpts = append(labelOpts, "disable") continue } var con []string if strings.Contains(opt, "=") { con = strings.SplitN(opt, "=", 2) } else if strings.Contains(opt, ":") { con = strings.SplitN(opt, ":", 2) logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.") } if len(con) != 2 { return fmt.Errorf("invalid --security-opt 1: %q", opt) } switch con[0] { case "label": labelOpts = append(labelOpts, con[1]) case "apparmor": container.AppArmorProfile = con[1] case "seccomp": container.SeccompProfile = con[1] case "no-new-privileges": noNewPrivileges, err := strconv.ParseBool(con[1]) if err != nil { return fmt.Errorf("invalid --security-opt 2: %q", opt) } container.NoNewPrivileges = noNewPrivileges default: return fmt.Errorf("invalid --security-opt 2: %q", opt) } } container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) return err } func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) { var throttleDevices []specs.LinuxThrottleDevice var stat unix.Stat_t for _, d := range devs { if err := unix.Stat(d.Path, &stat); err != nil { return nil, errors.WithStack(&os.PathError{Op: "stat", Path: d.Path, Err: err}) } d := specs.LinuxThrottleDevice{Rate: d.Rate} // the type is 32bit on mips d.Major = int64(unix.Major(uint64(stat.Rdev))) //nolint: unconvert d.Minor = int64(unix.Minor(uint64(stat.Rdev))) //nolint: unconvert throttleDevices = append(throttleDevices, d) } return throttleDevices, nil } // adjustParallelLimit takes a number of objects and a proposed limit and // figures out if it's reasonable (and adjusts it accordingly). This is only // used for daemon startup, which does a lot of parallel loading of containers // (and if we exceed RLIMIT_NOFILE then we're in trouble). func adjustParallelLimit(n int, limit int) int { // Rule-of-thumb overhead factor (how many files will each goroutine open // simultaneously). Yes, this is ugly but to be frank this whole thing is // ugly. const overhead = 2 // On Linux, we need to ensure that parallelStartupJobs doesn't cause us to // exceed RLIMIT_NOFILE. If parallelStartupJobs is too large, we reduce it // and give a warning (since in theory the user should increase their // ulimits to the largest possible value for dockerd). var rlim unix.Rlimit if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil { logrus.Warnf("Couldn't find dockerd's RLIMIT_NOFILE to double-check startup parallelism factor: %v", err) return limit } softRlimit := int(rlim.Cur) // Much fewer containers than RLIMIT_NOFILE. No need to adjust anything. if softRlimit > overhead*n { return limit } // RLIMIT_NOFILE big enough, no need to adjust anything. if softRlimit > overhead*limit { return limit } logrus.Warnf("Found dockerd's open file ulimit (%v) is far too small -- consider increasing it significantly (at least %v)", softRlimit, overhead*limit) return softRlimit / overhead } func checkKernel() error { // Check for unsupported kernel versions // FIXME: it would be cleaner to not test for specific versions, but rather // test for specific functionalities. // Unfortunately we can't test for the feature "does not cause a kernel panic" // without actually causing a kernel panic, so we need this workaround until // the circumstances of pre-3.10 crashes are clearer. // For details see https://github.com/docker/docker/issues/407 // Docker 1.11 and above doesn't actually run on kernels older than 3.4, // due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4). if !kernel.CheckKernelVersion(3, 10, 0) { v, _ := kernel.GetKernelVersion() if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String()) } } return nil } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if adjustCPUShares && hostConfig.CPUShares > 0 { // Handle unsupported CPUShares if hostConfig.CPUShares < linuxMinCPUShares { logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) hostConfig.CPUShares = linuxMinCPUShares } else if hostConfig.CPUShares > linuxMaxCPUShares { logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) hostConfig.CPUShares = linuxMaxCPUShares } } if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { // By default, MemorySwap is set to twice the size of Memory. hostConfig.MemorySwap = hostConfig.Memory * 2 } if hostConfig.ShmSize == 0 { hostConfig.ShmSize = config.DefaultShmSize if daemon.configStore != nil { hostConfig.ShmSize = int64(daemon.configStore.ShmSize) } } // Set default IPC mode, if unset for container if hostConfig.IpcMode.IsEmpty() { m := config.DefaultIpcMode if daemon.configStore != nil { m = containertypes.IpcMode(daemon.configStore.IpcMode) } hostConfig.IpcMode = m } // Set default cgroup namespace mode, if unset for container if hostConfig.CgroupnsMode.IsEmpty() { // for cgroup v2: unshare cgroupns even for privileged containers // https://github.com/containers/libpod/pull/4374#issuecomment-549776387 if hostConfig.Privileged && cgroups.Mode() != cgroups.Unified { hostConfig.CgroupnsMode = containertypes.CgroupnsModeHost } else { m := containertypes.CgroupnsModeHost if cgroups.Mode() == cgroups.Unified { m = containertypes.CgroupnsModePrivate } if daemon.configStore != nil { m = containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode) } hostConfig.CgroupnsMode = m } } adaptSharedNamespaceContainer(daemon, hostConfig) var err error secOpts, err := daemon.generateSecurityOpt(hostConfig) if err != nil { return err } hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, secOpts...) if hostConfig.OomKillDisable == nil { defaultOomKillDisable := false hostConfig.OomKillDisable = &defaultOomKillDisable } return nil } // adaptSharedNamespaceContainer replaces container name with its ID in hostConfig. // To be more precisely, it modifies `container:name` to `container:ID` of PidMode, IpcMode // and NetworkMode. // // When a container shares its namespace with another container, use ID can keep the namespace // sharing connection between the two containers even the another container is renamed. func adaptSharedNamespaceContainer(daemon containerGetter, hostConfig *containertypes.HostConfig) { containerPrefix := "container:" if hostConfig.PidMode.IsContainer() { pidContainer := hostConfig.PidMode.Container() // if there is any error returned here, we just ignore it and leave it to be // handled in the following logic if c, err := daemon.GetContainer(pidContainer); err == nil { hostConfig.PidMode = containertypes.PidMode(containerPrefix + c.ID) } } if hostConfig.IpcMode.IsContainer() { ipcContainer := hostConfig.IpcMode.Container() if c, err := daemon.GetContainer(ipcContainer); err == nil { hostConfig.IpcMode = containertypes.IpcMode(containerPrefix + c.ID) } } if hostConfig.NetworkMode.IsContainer() { netContainer := hostConfig.NetworkMode.ConnectedContainer() if c, err := daemon.GetContainer(netContainer); err == nil { hostConfig.NetworkMode = containertypes.NetworkMode(containerPrefix + c.ID) } } } // verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration func verifyPlatformContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) (warnings []string, err error) { fixMemorySwappiness(resources) // memory subsystem checks and adjustments if resources.Memory != 0 && resources.Memory < linuxMinMemory { return warnings, fmt.Errorf("Minimum memory limit allowed is 6MB") } if resources.Memory > 0 && !sysInfo.MemoryLimit { warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.Memory = 0 resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.") resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") } if resources.Memory == 0 && resources.MemorySwap > 0 && !update { return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") } if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness { warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") resources.MemorySwappiness = nil } if resources.MemorySwappiness != nil { swappiness := *resources.MemorySwappiness if swappiness < 0 || swappiness > 100 { return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) } } if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.MemoryReservation = 0 } if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { return warnings, fmt.Errorf("Minimum memory reservation allowed is 6MB") } if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage") } if resources.KernelMemory > 0 { // Kernel memory limit is not supported on cgroup v2. // Even on cgroup v1, kernel memory limit (`kmem.limit_in_bytes`) has been deprecated since kernel 5.4. // https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0b5adf44cae99b3ebcc7 warnings = append(warnings, "Specifying a kernel memory limit is deprecated and will be removed in a future release.") } if resources.KernelMemory > 0 && !sysInfo.KernelMemory { warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.KernelMemory = 0 } if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") } if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) { warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") } if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point // warning the caller if they already wanted the feature to be off if *resources.OomKillDisable { warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") } resources.OomKillDisable = nil } if resources.OomKillDisable != nil && *resources.OomKillDisable && resources.Memory == 0 { warnings = append(warnings, "OOM killer is disabled for the container, but no memory limit is set, this can result in the system running out of resources.") } if resources.PidsLimit != nil && !sysInfo.PidsLimit { if *resources.PidsLimit > 0 { warnings = append(warnings, "Your kernel does not support PIDs limit capabilities or the cgroup is not mounted. PIDs limit discarded.") } resources.PidsLimit = nil } // cpu subsystem checks and adjustments if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 { return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set") } if resources.NanoCPUs > 0 && resources.CPUQuota > 0 { return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set") } if resources.NanoCPUs > 0 && !sysInfo.CPUCfs { return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU CFS scheduler or the cgroup is not mounted") } // The highest precision we could get on Linux is 0.001, by setting // cpu.cfs_period_us=1000ms // cpu.cfs_quota=1ms // See the following link for details: // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt // Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error. // The error message is 0.01 so that this is consistent with Windows if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } if resources.CPUShares > 0 && !sysInfo.CPUShares { warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") resources.CPUShares = 0 } if (resources.CPUPeriod != 0 || resources.CPUQuota != 0) && !sysInfo.CPUCfs { warnings = append(warnings, "Your kernel does not support CPU CFS scheduler. CPU period/quota discarded.") resources.CPUPeriod = 0 resources.CPUQuota = 0 } if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") } if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") } if resources.CPUPercent > 0 { warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS)) resources.CPUPercent = 0 } // cpuset subsystem checks and adjustments if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") resources.CpusetCpus = "" resources.CpusetMems = "" } cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) if err != nil { return warnings, errors.Wrapf(err, "Invalid value %s for cpuset cpus", resources.CpusetCpus) } if !cpusAvailable { return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) } memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) if err != nil { return warnings, errors.Wrapf(err, "Invalid value %s for cpuset mems", resources.CpusetMems) } if !memsAvailable { return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) } // blkio subsystem checks and adjustments if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") resources.BlkioWeight = 0 } if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") } if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) } if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} } if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.") resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} } return warnings, nil } func (daemon *Daemon) getCgroupDriver() string { if UsingSystemd(daemon.configStore) { return cgroupSystemdDriver } if daemon.Rootless() { return cgroupNoneDriver } return cgroupFsDriver } // getCD gets the raw value of the native.cgroupdriver option, if set. func getCD(config *config.Config) string { for _, option := range config.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { continue } return val } return "" } // verifyCgroupDriver validates native.cgroupdriver func verifyCgroupDriver(config *config.Config) error { cd := getCD(config) if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { return nil } if cd == cgroupNoneDriver { return fmt.Errorf("native.cgroupdriver option %s is internally used and cannot be specified manually", cd) } return fmt.Errorf("native.cgroupdriver option %s not supported", cd) } // UsingSystemd returns true if cli option includes native.cgroupdriver=systemd func UsingSystemd(config *config.Config) bool { cd := getCD(config) if cd == cgroupSystemdDriver { return true } // On cgroup v2 hosts, default to systemd driver if cd == "" && cgroups.Mode() == cgroups.Unified && isRunningSystemd() { return true } return false } var ( runningSystemd bool detectSystemd sync.Once ) // isRunningSystemd checks whether the host was booted with systemd as its init // system. This functions similarly to systemd's `sd_booted(3)`: internally, it // checks whether /run/systemd/system/ exists and is a directory. // http://www.freedesktop.org/software/systemd/man/sd_booted.html // // NOTE: This function comes from package github.com/coreos/go-systemd/util // It was borrowed here to avoid a dependency on cgo. func isRunningSystemd() bool { detectSystemd.Do(func() { fi, err := os.Lstat("/run/systemd/system") if err != nil { return } runningSystemd = fi.IsDir() }) return runningSystemd } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) { if hostConfig == nil { return nil, nil } sysInfo := daemon.RawSysInfo() w, err := verifyPlatformContainerResources(&hostConfig.Resources, sysInfo, update) // no matter err is nil or not, w could have data in itself. warnings = append(warnings, w...) if err != nil { return warnings, err } if hostConfig.ShmSize < 0 { return warnings, fmt.Errorf("SHM size can not be less than 0") } if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) } // ip-forwarding does not affect container with '--net=host' (or '--net=none') if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") } if hostConfig.NetworkMode.IsHost() && len(hostConfig.PortBindings) > 0 { warnings = append(warnings, "Published ports are discarded when using host network mode") } // check for various conflicting options with user namespaces if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { if hostConfig.Privileged { return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces. You must run the container in the host namespace when running privileged mode") } if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("cannot share the host's network namespace when user namespaces are enabled") } if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled") } } if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { // CgroupParent for systemd cgroup should be named as "xxx.slice" if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if hostConfig.Runtime == "" { hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() } if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) } parser := volumemounts.NewParser() for dest := range hostConfig.Tmpfs { if err := parser.ValidateTmpfsMountDestination(dest); err != nil { return warnings, err } } if !hostConfig.CgroupnsMode.Valid() { return warnings, fmt.Errorf("invalid cgroup namespace mode: %v", hostConfig.CgroupnsMode) } if hostConfig.CgroupnsMode.IsPrivate() { if !sysInfo.CgroupNamespaces { warnings = append(warnings, "Your kernel does not support cgroup namespaces. Cgroup namespace setting discarded.") } } if hostConfig.Runtime == config.LinuxV1RuntimeName || (hostConfig.Runtime == "" && daemon.configStore.DefaultRuntime == config.LinuxV1RuntimeName) { warnings = append(warnings, fmt.Sprintf("Configured runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName)) } return warnings, nil } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(conf *config.Config) error { if conf.ContainerdNamespace == conf.ContainerdPluginNamespace { return errors.New("containers namespace and plugins namespace cannot be the same") } // Check for mutually incompatible config options if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" { return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") } if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication { return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") } if conf.BridgeConfig.EnableIP6Tables && !conf.Experimental { return fmt.Errorf("ip6tables rules are only available if experimental features are enabled") } if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq { conf.BridgeConfig.EnableIPMasq = false } if err := verifyCgroupDriver(conf); err != nil { return err } if conf.CgroupParent != "" && UsingSystemd(conf) { if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") { return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if conf.Rootless && UsingSystemd(conf) && cgroups.Mode() != cgroups.Unified { return fmt.Errorf("exec-opt native.cgroupdriver=systemd requires cgroup v2 for rootless mode") } configureRuntimes(conf) if rtName := conf.GetDefaultRuntimeName(); rtName != "" { if conf.GetRuntime(rtName) == nil { return fmt.Errorf("specified default runtime '%s' does not exist", rtName) } if rtName == config.LinuxV1RuntimeName { logrus.Warnf("Configured default runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName) } } return nil } // checkSystem validates platform-specific requirements func checkSystem() error { return checkKernel() } // configureMaxThreads sets the Go runtime max threads threshold // which is 90% of the kernel setting from /proc/sys/kernel/threads-max func configureMaxThreads(config *config.Config) error { mt, err := os.ReadFile("/proc/sys/kernel/threads-max") if err != nil { return err } mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) if err != nil { return err } maxThreads := (mtint / 100) * 90 debug.SetMaxThreads(maxThreads) logrus.Debugf("Golang's threads limit set to %d", maxThreads) return nil } func overlaySupportsSelinux() (bool, error) { f, err := os.Open("/proc/kallsyms") if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.HasSuffix(s.Text(), " security_inode_copy_up") { return true, nil } } return false, s.Err() } // configureKernelSecuritySupport configures and validates security support for the kernel func configureKernelSecuritySupport(config *config.Config, driverName string) error { if config.EnableSelinuxSupport { if !selinux.GetEnabled() { logrus.Warn("Docker could not enable SELinux on the host system") return nil } if driverName == "overlay" || driverName == "overlay2" { // If driver is overlay or overlay2, make sure kernel // supports selinux with overlay. supported, err := overlaySupportsSelinux() if err != nil { return err } if !supported { logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName) } } } else { selinux.SetDisabled() } return nil } func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) if err != nil { return nil, err } controller, err := libnetwork.New(netOptions...) if err != nil { return nil, fmt.Errorf("error obtaining controller instance: %v", err) } if len(activeSandboxes) > 0 { logrus.Info("There are old running containers, the network config will not take affect") setHostGatewayIP(daemon.configStore, controller) return controller, nil } // Initialize default network on "null" if n, _ := controller.NetworkByName("none"); n == nil { if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) } } // Initialize default network on "host" if n, _ := controller.NetworkByName("host"); n == nil { if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) } } // Clear stale bridge network if n, err := controller.NetworkByName("bridge"); err == nil { if err = n.Delete(); err != nil { return nil, fmt.Errorf("could not delete the default bridge network: %v", err) } if len(config.NetworkConfig.DefaultAddressPools.Value()) > 0 && !daemon.configStore.LiveRestoreEnabled { removeDefaultBridgeInterface() } } if !config.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, config); err != nil { return nil, err } } else { removeDefaultBridgeInterface() } // Set HostGatewayIP to the default bridge's IP if it is empty setHostGatewayIP(daemon.configStore, controller) return controller, nil } // setHostGatewayIP sets cfg.HostGatewayIP to the default bridge's IP if it is empty. func setHostGatewayIP(config *config.Config, controller libnetwork.NetworkController) { if config.HostGatewayIP != nil { return } if n, err := controller.NetworkByName("bridge"); err == nil { v4Info, v6Info := n.Info().IpamInfo() var gateway net.IP if len(v4Info) > 0 { gateway = v4Info[0].Gateway.IP } else if len(v6Info) > 0 { gateway = v6Info[0].Gateway.IP } config.HostGatewayIP = gateway } } func driverOptions(config *config.Config) nwconfig.Option { return nwconfig.OptionDriverConfig("bridge", options.Generic{ netlabel.GenericData: options.Generic{ "EnableIPForwarding": config.BridgeConfig.EnableIPForward, "EnableIPTables": config.BridgeConfig.EnableIPTables, "EnableIP6Tables": config.BridgeConfig.EnableIP6Tables, "EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy, "UserlandProxyPath": config.BridgeConfig.UserlandProxyPath, }, }) } func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { bridgeName := bridge.DefaultBridgeName if config.BridgeConfig.Iface != "" { bridgeName = config.BridgeConfig.Iface } netOption := map[string]string{ bridge.BridgeName: bridgeName, bridge.DefaultBridge: strconv.FormatBool(true), netlabel.DriverMTU: strconv.Itoa(config.Mtu), bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq), bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication), } // --ip processing if config.BridgeConfig.DefaultIP != nil { netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String() } ipamV4Conf := &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) if err != nil { return errors.Wrap(err, "list bridge addresses failed") } nw := nwList[0] if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" { _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return errors.Wrap(err, "parse CIDR failed") } // Iterate through in case there are multiple addresses for the bridge for _, entry := range nwList { if fCIDR.Contains(entry.IP) { nw = entry break } } } ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) if hip.IsGlobalUnicast() { ipamV4Conf.Gateway = nw.IP.String() } if config.BridgeConfig.IP != "" { ip, ipNet, err := net.ParseCIDR(config.BridgeConfig.IP) if err != nil { return err } ipamV4Conf.PreferredPool = ipNet.String() ipamV4Conf.Gateway = ip.String() } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) } if config.BridgeConfig.FixedCIDR != "" { _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return err } ipamV4Conf.SubPool = fCIDR.String() } if config.BridgeConfig.DefaultGatewayIPv4 != nil { ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String() } var ( deferIPv6Alloc bool ipamV6Conf *libnetwork.IpamConf ) if config.BridgeConfig.EnableIPv6 && config.BridgeConfig.FixedCIDRv6 == "" { return errdefs.InvalidParameter(errors.New("IPv6 is enabled for the default bridge, but no subnet is configured. Specify an IPv6 subnet using --fixed-cidr-v6")) } else if config.BridgeConfig.FixedCIDRv6 != "" { _, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6) if err != nil { return err } // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has // at least 48 host bits, we need to guarantee the current behavior where the containers' // IPv6 addresses will be constructed based on the containers' interface MAC address. // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints // on this network until after the driver has created the endpoint and returned the // constructed address. Libnetwork will then reserve this address with the ipam driver. ones, _ := fCIDRv6.Mask.Size() deferIPv6Alloc = ones <= 80 ipamV6Conf = &libnetwork.IpamConf{ AuxAddresses: make(map[string]string), PreferredPool: fCIDRv6.String(), } // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 // address belongs to the same network, we need to inform libnetwork about it, so // that it can be reserved with IPAM and it will not be given away to somebody else for _, nw6 := range nw6List { if fCIDRv6.Contains(nw6.IP) { ipamV6Conf.Gateway = nw6.IP.String() break } } } if config.BridgeConfig.DefaultGatewayIPv6 != nil { if ipamV6Conf == nil { ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} } ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String() } v4Conf := []*libnetwork.IpamConf{ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} if ipamV6Conf != nil { v6Conf = append(v6Conf, ipamV6Conf) } // Initialize default network on "bridge" with the same name _, err = controller.NewNetwork("bridge", "bridge", "", libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6), libnetwork.NetworkOptionDriverOpts(netOption), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) if err != nil { return fmt.Errorf("Error creating default \"bridge\" network: %v", err) } return nil } // Remove default bridge interface if present (--bridge=none use case) func removeDefaultBridgeInterface() { if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil { if err := netlink.LinkDel(lnk); err != nil { logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) } } } func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error { return func(initPath containerfs.ContainerFS) error { return initlayer.Setup(initPath, idMapping.RootPair()) } } // Parse the remapped root (user namespace) option, which can be one of: // username - valid username from /etc/passwd // username:groupname - valid username; valid groupname from /etc/group // uid - 32-bit unsigned int valid Linux UID value // uid:gid - uid value; 32-bit unsigned int Linux GID value // // If no groupname is specified, and a username is specified, an attempt // will be made to lookup a gid for that username as a groupname // // If names are used, they are verified to exist in passwd/group func parseRemappedRoot(usergrp string) (string, string, error) { var ( userID, groupID int username, groupname string ) idparts := strings.Split(usergrp, ":") if len(idparts) > 2 { return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) } if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { // must be a uid; take it as valid userID = int(uid) luser, err := idtools.LookupUID(userID) if err != nil { return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) } username = luser.Name if len(idparts) == 1 { // if the uid was numeric and no gid was specified, take the uid as the gid groupID = userID lgrp, err := idtools.LookupGID(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) } groupname = lgrp.Name } } else { lookupName := idparts[0] // special case: if the user specified "default", they want Docker to create or // use (after creation) the "dockremap" user/group for root remapping if lookupName == defaultIDSpecifier { lookupName = defaultRemappedID } luser, err := idtools.LookupUser(lookupName) if err != nil && idparts[0] != defaultIDSpecifier { // error if the name requested isn't the special "dockremap" ID return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) } else if err != nil { // special case-- if the username == "default", then we have been asked // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} // ranges will be used for the user and group mappings in user namespaced containers _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) if err == nil { return defaultRemappedID, defaultRemappedID, nil } return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) } username = luser.Name if len(idparts) == 1 { // we only have a string username, and no group specified; look up gid from username as group group, err := idtools.LookupGroup(lookupName) if err != nil { return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) } groupname = group.Name } } if len(idparts) == 2 { // groupname or gid is separately specified and must be resolved // to an unsigned 32-bit gid if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { // must be a gid, take it as valid groupID = int(gid) lgrp, err := idtools.LookupGID(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) } groupname = lgrp.Name } else { // not a number; attempt a lookup if _, err := idtools.LookupGroup(idparts[1]); err != nil { return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) } groupname = idparts[1] } } return username, groupname, nil } func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) { if runtime.GOOS != "linux" && config.RemappedRoot != "" { return nil, fmt.Errorf("User namespaces are only supported on Linux") } // if the daemon was started with remapped root option, parse // the config option to the int uid,gid values if config.RemappedRoot != "" { username, groupname, err := parseRemappedRoot(config.RemappedRoot) if err != nil { return nil, err } if username == "root" { // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op // effectively logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") return &idtools.IdentityMapping{}, nil } logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s", username) // update remapped root setting now that we have resolved them to actual names config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) mappings, err := idtools.NewIdentityMapping(username) if err != nil { return nil, errors.Wrap(err, "Can't create ID mappings") } return mappings, nil } return &idtools.IdentityMapping{}, nil } func setupDaemonRoot(config *config.Config, rootDir string, remappedRoot idtools.Identity) error { config.Root = rootDir // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) // so that syscalls executing as non-root, operating on subdirectories of the graph root // (e.g. mounted layers of a container) can traverse this path. // The user namespace support will create subdirectories for the remapped root host uid:gid // pair owned by that same uid:gid pair for proper write access to those needed metadata and // layer content subtrees. if _, err := os.Stat(rootDir); err == nil { // root current exists; verify the access bits are correct by setting them if err = os.Chmod(rootDir, 0711); err != nil { return err } } else if os.IsNotExist(err) { // no root exists yet, create it 0711 with root:root ownership if err := os.MkdirAll(rootDir, 0711); err != nil { return err } } id := idtools.Identity{UID: idtools.CurrentIdentity().UID, GID: remappedRoot.GID} // First make sure the current root dir has the correct perms. if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil { return errors.Wrapf(err, "could not create or set daemon root permissions: %s", config.Root) } // if user namespaces are enabled we will create a subtree underneath the specified root // with any/all specified remapped root uid/gid options on the daemon creating // a new subdirectory with ownership set to the remapped uid/gid (so as to allow // `chdir()` to work for containers namespaced to that uid/gid) if config.RemappedRoot != "" { config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", remappedRoot.UID, remappedRoot.GID)) logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) // Create the root directory if it doesn't exist if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil { return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) } // we also need to verify that any pre-existing directories in the path to // the graphroot won't block access to remapped root--if any pre-existing directory // has strict permissions that don't allow "x", container start will fail, so // better to warn and fail now dirPath := config.Root for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if !idtools.CanAccess(dirPath, remappedRoot) { return fmt.Errorf("a subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories", config.Root) } } } if err := setupDaemonRootPropagation(config); err != nil { logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior") } return nil } func setupDaemonRootPropagation(cfg *config.Config) error { rootParentMount, mountOptions, err := getSourceMount(cfg.Root) if err != nil { return errors.Wrap(err, "error getting daemon root's parent mount") } var cleanupOldFile bool cleanupFile := getUnmountOnShutdownPath(cfg) defer func() { if !cleanupOldFile { return } if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) { logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file") } }() if hasMountInfoOption(mountOptions, sharedPropagationOption, slavePropagationOption) { cleanupOldFile = true return nil } if err := mount.MakeShared(cfg.Root); err != nil { return errors.Wrap(err, "could not setup daemon root propagation to shared") } // check the case where this may have already been a mount to itself. // If so then the daemon only performed a remount and should not try to unmount this later. if rootParentMount == cfg.Root { cleanupOldFile = true return nil } if err := os.MkdirAll(filepath.Dir(cleanupFile), 0700); err != nil { return errors.Wrap(err, "error creating dir to store mount cleanup file") } if err := os.WriteFile(cleanupFile, nil, 0600); err != nil { return errors.Wrap(err, "error writing file to signal mount cleanup on shutdown") } return nil } // getUnmountOnShutdownPath generates the path to used when writing the file that signals to the daemon that on shutdown // the daemon root should be unmounted. func getUnmountOnShutdownPath(config *config.Config) string { return filepath.Join(config.ExecRoot, "unmount-on-shutdown") } // registerLinks writes the links to a file. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { return nil } for _, l := range hostConfig.Links { name, alias, err := opts.ParseLink(l) if err != nil { return err } child, err := daemon.GetContainer(name) if err != nil { if errdefs.IsNotFound(err) { // Trying to link to a non-existing container is not valid, and // should return an "invalid parameter" error. Returning a "not // found" error here would make the client report the container's // image could not be found (see moby/moby#39823) err = errdefs.InvalidParameter(err) } return errors.Wrapf(err, "could not get container for %s", name) } for child.HostConfig.NetworkMode.IsContainer() { parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) child, err = daemon.GetContainer(parts[1]) if err != nil { if errdefs.IsNotFound(err) { // Trying to link to a non-existing container is not valid, and // should return an "invalid parameter" error. Returning a "not // found" error here would make the client report the container's // image could not be found (see moby/moby#39823) err = errdefs.InvalidParameter(err) } return errors.Wrapf(err, "Could not get container for %s", parts[1]) } } if child.HostConfig.NetworkMode.IsHost() { return runconfig.ErrConflictHostNetworkAndLinks } if err := daemon.registerLink(container, child, alias); err != nil { return err } } // After we load all the links into the daemon // set them to nil on the hostconfig _, err := container.WriteHostConfig() return err } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { return daemon.Unmount(container) } func copyBlkioEntry(entries []*statsV1.BlkIOEntry) []types.BlkioStatEntry { out := make([]types.BlkioStatEntry, len(entries)) for i, re := range entries { out[i] = types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: re.Op, Value: re.Value, } } return out } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning(c.ID) } cs, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { if strings.Contains(err.Error(), "container not found") { return nil, containerNotFound(c.ID) } return nil, err } s := &types.StatsJSON{} s.Read = cs.Read stats := cs.Metrics switch t := stats.(type) { case *statsV1.Metrics: return daemon.statsV1(s, t) case *statsV2.Metrics: return daemon.statsV2(s, t) default: return nil, errors.Errorf("unexpected type of metrics %+v", t) } } func (daemon *Daemon) statsV1(s *types.StatsJSON, stats *statsV1.Metrics) (*types.StatsJSON, error) { if stats.Blkio != nil { s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: copyBlkioEntry(stats.Blkio.IoServiceBytesRecursive), IoServicedRecursive: copyBlkioEntry(stats.Blkio.IoServicedRecursive), IoQueuedRecursive: copyBlkioEntry(stats.Blkio.IoQueuedRecursive), IoServiceTimeRecursive: copyBlkioEntry(stats.Blkio.IoServiceTimeRecursive), IoWaitTimeRecursive: copyBlkioEntry(stats.Blkio.IoWaitTimeRecursive), IoMergedRecursive: copyBlkioEntry(stats.Blkio.IoMergedRecursive), IoTimeRecursive: copyBlkioEntry(stats.Blkio.IoTimeRecursive), SectorsRecursive: copyBlkioEntry(stats.Blkio.SectorsRecursive), } } if stats.CPU != nil { s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: stats.CPU.Usage.Total, PercpuUsage: stats.CPU.Usage.PerCPU, UsageInKernelmode: stats.CPU.Usage.Kernel, UsageInUsermode: stats.CPU.Usage.User, }, ThrottlingData: types.ThrottlingData{ Periods: stats.CPU.Throttling.Periods, ThrottledPeriods: stats.CPU.Throttling.ThrottledPeriods, ThrottledTime: stats.CPU.Throttling.ThrottledTime, }, } } if stats.Memory != nil { raw := map[string]uint64{ "cache": stats.Memory.Cache, "rss": stats.Memory.RSS, "rss_huge": stats.Memory.RSSHuge, "mapped_file": stats.Memory.MappedFile, "dirty": stats.Memory.Dirty, "writeback": stats.Memory.Writeback, "pgpgin": stats.Memory.PgPgIn, "pgpgout": stats.Memory.PgPgOut, "pgfault": stats.Memory.PgFault, "pgmajfault": stats.Memory.PgMajFault, "inactive_anon": stats.Memory.InactiveAnon, "active_anon": stats.Memory.ActiveAnon, "inactive_file": stats.Memory.InactiveFile, "active_file": stats.Memory.ActiveFile, "unevictable": stats.Memory.Unevictable, "hierarchical_memory_limit": stats.Memory.HierarchicalMemoryLimit, "hierarchical_memsw_limit": stats.Memory.HierarchicalSwapLimit, "total_cache": stats.Memory.TotalCache, "total_rss": stats.Memory.TotalRSS, "total_rss_huge": stats.Memory.TotalRSSHuge, "total_mapped_file": stats.Memory.TotalMappedFile, "total_dirty": stats.Memory.TotalDirty, "total_writeback": stats.Memory.TotalWriteback, "total_pgpgin": stats.Memory.TotalPgPgIn, "total_pgpgout": stats.Memory.TotalPgPgOut, "total_pgfault": stats.Memory.TotalPgFault, "total_pgmajfault": stats.Memory.TotalPgMajFault, "total_inactive_anon": stats.Memory.TotalInactiveAnon, "total_active_anon": stats.Memory.TotalActiveAnon, "total_inactive_file": stats.Memory.TotalInactiveFile, "total_active_file": stats.Memory.TotalActiveFile, "total_unevictable": stats.Memory.TotalUnevictable, } if stats.Memory.Usage != nil { s.MemoryStats = types.MemoryStats{ Stats: raw, Usage: stats.Memory.Usage.Usage, MaxUsage: stats.Memory.Usage.Max, Limit: stats.Memory.Usage.Limit, Failcnt: stats.Memory.Usage.Failcnt, } } else { s.MemoryStats = types.MemoryStats{ Stats: raw, } } // if the container does not set memory limit, use the machineMemory if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { s.MemoryStats.Limit = daemon.machineMemory } } if stats.Pids != nil { s.PidsStats = types.PidsStats{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, } } return s, nil } func (daemon *Daemon) statsV2(s *types.StatsJSON, stats *statsV2.Metrics) (*types.StatsJSON, error) { if stats.Io != nil { var isbr []types.BlkioStatEntry for _, re := range stats.Io.Usage { isbr = append(isbr, types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: "read", Value: re.Rbytes, }, types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: "write", Value: re.Wbytes, }, ) } s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: isbr, // Other fields are unsupported } } if stats.CPU != nil { s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: stats.CPU.UsageUsec * 1000, // PercpuUsage is not supported UsageInKernelmode: stats.CPU.SystemUsec * 1000, UsageInUsermode: stats.CPU.UserUsec * 1000, }, ThrottlingData: types.ThrottlingData{ Periods: stats.CPU.NrPeriods, ThrottledPeriods: stats.CPU.NrThrottled, ThrottledTime: stats.CPU.ThrottledUsec * 1000, }, } } if stats.Memory != nil { s.MemoryStats = types.MemoryStats{ // Stats is not compatible with v1 Stats: map[string]uint64{ "anon": stats.Memory.Anon, "file": stats.Memory.File, "kernel_stack": stats.Memory.KernelStack, "slab": stats.Memory.Slab, "sock": stats.Memory.Sock, "shmem": stats.Memory.Shmem, "file_mapped": stats.Memory.FileMapped, "file_dirty": stats.Memory.FileDirty, "file_writeback": stats.Memory.FileWriteback, "anon_thp": stats.Memory.AnonThp, "inactive_anon": stats.Memory.InactiveAnon, "active_anon": stats.Memory.ActiveAnon, "inactive_file": stats.Memory.InactiveFile, "active_file": stats.Memory.ActiveFile, "unevictable": stats.Memory.Unevictable, "slab_reclaimable": stats.Memory.SlabReclaimable, "slab_unreclaimable": stats.Memory.SlabUnreclaimable, "pgfault": stats.Memory.Pgfault, "pgmajfault": stats.Memory.Pgmajfault, "workingset_refault": stats.Memory.WorkingsetRefault, "workingset_activate": stats.Memory.WorkingsetActivate, "workingset_nodereclaim": stats.Memory.WorkingsetNodereclaim, "pgrefill": stats.Memory.Pgrefill, "pgscan": stats.Memory.Pgscan, "pgsteal": stats.Memory.Pgsteal, "pgactivate": stats.Memory.Pgactivate, "pgdeactivate": stats.Memory.Pgdeactivate, "pglazyfree": stats.Memory.Pglazyfree, "pglazyfreed": stats.Memory.Pglazyfreed, "thp_fault_alloc": stats.Memory.ThpFaultAlloc, "thp_collapse_alloc": stats.Memory.ThpCollapseAlloc, }, Usage: stats.Memory.Usage, // MaxUsage is not supported Limit: stats.Memory.UsageLimit, } // if the container does not set memory limit, use the machineMemory if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { s.MemoryStats.Limit = daemon.machineMemory } if stats.MemoryEvents != nil { // Failcnt is set to the "oom" field of the "memory.events" file. // See https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html s.MemoryStats.Failcnt = stats.MemoryEvents.Oom } } if stats.Pids != nil { s.PidsStats = types.PidsStats{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, } } return s, nil } // setDefaultIsolation determines the default isolation mode for the // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { return nil } // setupDaemonProcess sets various settings for the daemon's process func setupDaemonProcess(config *config.Config) error { // setup the daemons oom_score_adj if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil { return err } if err := setMayDetachMounts(); err != nil { logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter") } return nil } // This is used to allow removal of mountpoints that may be mounted in other // namespaces on RHEL based kernels starting from RHEL 7.4. // Without this setting, removals on these RHEL based kernels may fail with // "device or resource busy". // This setting is not available in upstream kernels as it is not configurable, // but has been in the upstream kernels since 3.15. func setMayDetachMounts() error { f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0) if err != nil { if os.IsNotExist(err) { return nil } return errors.Wrap(err, "error opening may_detach_mounts kernel config file") } defer f.Close() _, err = f.WriteString("1") if os.IsPermission(err) { // Setting may_detach_mounts does not work in an // unprivileged container. Ignore the error, but log // it if we appear not to be in that situation. if !userns.RunningInUserNS() { logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1") } return nil } return err } func setupOOMScoreAdj(score int) error { if score == 0 { return nil } f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) if err != nil { return err } defer f.Close() stringScore := strconv.Itoa(score) _, err = f.WriteString(stringScore) if os.IsPermission(err) { // Setting oom_score_adj does not work in an // unprivileged container. Ignore the error, but log // it if we appear not to be in that situation. if !userns.RunningInUserNS() { logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore) } return nil } return err } func (daemon *Daemon) initCPURtController(mnt, path string) error { if path == "/" || path == "." { return nil } // Recursively create cgroup to ensure that the system and all parent cgroups have values set // for the period and runtime as this limits what the children can be set to. if err := daemon.initCPURtController(mnt, filepath.Dir(path)); err != nil { return err } path = filepath.Join(mnt, path) if err := os.MkdirAll(path, 0755); err != nil { return err } if err := maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil { return err } return maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path) } func maybeCreateCPURealTimeFile(configValue int64, file string, path string) error { if configValue == 0 { return nil } return os.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700) } func (daemon *Daemon) setupSeccompProfile() error { switch profile := daemon.configStore.SeccompProfile; profile { case "", config.SeccompProfileDefault: daemon.seccompProfilePath = config.SeccompProfileDefault case config.SeccompProfileUnconfined: daemon.seccompProfilePath = config.SeccompProfileUnconfined default: daemon.seccompProfilePath = profile b, err := os.ReadFile(profile) if err != nil { return fmt.Errorf("opening seccomp profile (%s) failed: %v", profile, err) } daemon.seccompProfile = b } return nil } // RawSysInfo returns *sysinfo.SysInfo . func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo { var siOpts []sysinfo.Opt if daemon.getCgroupDriver() == cgroupSystemdDriver { if euid := os.Getenv("ROOTLESSKIT_PARENT_EUID"); euid != "" { siOpts = append(siOpts, sysinfo.WithCgroup2GroupPath("/user.slice/user-"+euid+".slice")) } } return sysinfo.New(siOpts...) } func recursiveUnmount(target string) error { return mount.RecursiveUnmount(target) } func (daemon *Daemon) initLibcontainerd(ctx context.Context) error { var err error daemon.containerd, err = remote.NewClient( ctx, daemon.containerdCli, filepath.Join(daemon.configStore.ExecRoot, "containerd"), daemon.configStore.ContainerdNamespace, daemon, ) return err }
sanchayanghosh
58e68789bf37b1484e773a3db99146ae36942295
40ccedd61b9d8b64fc75d64cd57b9ee35c769632
Perhaps instead of passing the whole daemon, we could make it get just the config-store. Looks like the nil-check for `controller` is also redundant (because at this point, we should always have a controller?) Perhaps also change the `if` to use an early return, so that we don't need the nested ifs; ```go // setHostGatewayIP sets cfg.HostGatewayIP to the default bridge's IP if it is empty func setHostGatewayIP(config *config.Config, controller libnetwork.NetworkController) { if config.HostGatewayIP != nil { return } if n, err := controller.NetworkByName("bridge"); err == nil { ... ```
thaJeztah
4,483
moby/moby
42,785
Fixed docker.internal.gateway not displaying properly on live restore
fixes https://github.com/moby/moby/issues/42753 …erly <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** I fixed the issue 42573, where the docker.internal.host-gateway was not assigned to containers after a reboot, when live-restore was enabled. This was to do with containers that ran after the docker daemon was restarted. **- How I did it** There was a function that assigned the host gateway IP addresses. I made sure that the IP address was also called inside the conditional that ran when there were active sandboxes. **- How to verify it** #42753 - details the steps **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> This pull request is a change to remove the recurrence of #42753 , by writing HostGatewayIP to a function and calling that once in the normal case and once when the sandboxes(containers) are running during daemon restart. The pull request is a WIP since I am yet to write a unit test. I would need your guidance for it. I am not sure how to call the API for the test, for docker run --add-host=.... busybox in script form and getting the output. I know how to do it with fork/subprocess, but am not sure of the API approach to it. **- A picture of a cute animal (not mandatory but encouraged)** ![Not mine, but posting it here.](https://ichef.bbci.co.uk/news/976/cpsprodpb/3B4B/production/_109897151_otternew.jpg)
null
2021-08-25 21:06:53+00:00
2021-11-16 04:26:21+00:00
daemon/daemon_unix.go
//go:build linux || freebsd // +build linux freebsd package daemon // import "github.com/docker/docker/daemon" import ( "bufio" "context" "fmt" "net" "os" "path/filepath" "runtime" "runtime/debug" "strconv" "strings" "sync" "time" "github.com/containerd/cgroups" statsV1 "github.com/containerd/cgroups/stats/v1" statsV2 "github.com/containerd/cgroups/v2/stats" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/blkiodev" pblkiodev "github.com/docker/docker/api/types/blkiodev" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/initlayer" "github.com/docker/docker/errdefs" "github.com/docker/docker/libcontainerd/remote" "github.com/docker/docker/libnetwork" nwconfig "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/drivers/bridge" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/netutils" "github.com/docker/docker/libnetwork/options" lntypes "github.com/docker/docker/libnetwork/types" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/runconfig" volumemounts "github.com/docker/docker/volume/mounts" "github.com/moby/sys/mount" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" ) const ( isWindows = false // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 linuxMinCPUShares = 2 linuxMaxCPUShares = 262144 platformSupported = true // It's not kernel limit, we want this 6M limit to account for overhead during startup, and to supply a reasonable functional container linuxMinMemory = 6291456 // constants for remapped root settings defaultIDSpecifier = "default" defaultRemappedID = "dockremap" // constant for cgroup drivers cgroupFsDriver = "cgroupfs" cgroupSystemdDriver = "systemd" cgroupNoneDriver = "none" ) type containerGetter interface { GetContainer(string) (*container.Container, error) } func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory { memory := specs.LinuxMemory{} if config.Memory > 0 { memory.Limit = &config.Memory } if config.MemoryReservation > 0 { memory.Reservation = &config.MemoryReservation } if config.MemorySwap > 0 { memory.Swap = &config.MemorySwap } if config.MemorySwappiness != nil { swappiness := uint64(*config.MemorySwappiness) memory.Swappiness = &swappiness } if config.OomKillDisable != nil { memory.DisableOOMKiller = config.OomKillDisable } if config.KernelMemory != 0 { memory.Kernel = &config.KernelMemory } if config.KernelMemoryTCP != 0 { memory.KernelTCP = &config.KernelMemoryTCP } return &memory } func getPidsLimit(config containertypes.Resources) *specs.LinuxPids { if config.PidsLimit == nil { return nil } if *config.PidsLimit <= 0 { // docker API allows 0 and negative values to unset this to be consistent // with default values. When updating values, runc requires -1 to unset // the previous limit. return &specs.LinuxPids{Limit: -1} } return &specs.LinuxPids{Limit: *config.PidsLimit} } func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) { cpu := specs.LinuxCPU{} if config.CPUShares < 0 { return nil, fmt.Errorf("shares: invalid argument") } if config.CPUShares >= 0 { shares := uint64(config.CPUShares) cpu.Shares = &shares } if config.CpusetCpus != "" { cpu.Cpus = config.CpusetCpus } if config.CpusetMems != "" { cpu.Mems = config.CpusetMems } if config.NanoCPUs > 0 { // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt period := uint64(100 * time.Millisecond / time.Microsecond) quota := config.NanoCPUs * int64(period) / 1e9 cpu.Period = &period cpu.Quota = &quota } if config.CPUPeriod != 0 { period := uint64(config.CPUPeriod) cpu.Period = &period } if config.CPUQuota != 0 { q := config.CPUQuota cpu.Quota = &q } if config.CPURealtimePeriod != 0 { period := uint64(config.CPURealtimePeriod) cpu.RealtimePeriod = &period } if config.CPURealtimeRuntime != 0 { c := config.CPURealtimeRuntime cpu.RealtimeRuntime = &c } return &cpu, nil } func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) { var stat unix.Stat_t var blkioWeightDevices []specs.LinuxWeightDevice for _, weightDevice := range config.BlkioWeightDevice { if err := unix.Stat(weightDevice.Path, &stat); err != nil { return nil, errors.WithStack(&os.PathError{Op: "stat", Path: weightDevice.Path, Err: err}) } weight := weightDevice.Weight d := specs.LinuxWeightDevice{Weight: &weight} // The type is 32bit on mips. d.Major = int64(unix.Major(uint64(stat.Rdev))) //nolint: unconvert d.Minor = int64(unix.Minor(uint64(stat.Rdev))) //nolint: unconvert blkioWeightDevices = append(blkioWeightDevices, d) } return blkioWeightDevices, nil } func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { container.NoNewPrivileges = daemon.configStore.NoNewPrivileges return parseSecurityOpt(container, hostConfig) } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { var ( labelOpts []string err error ) for _, opt := range config.SecurityOpt { if opt == "no-new-privileges" { container.NoNewPrivileges = true continue } if opt == "disable" { labelOpts = append(labelOpts, "disable") continue } var con []string if strings.Contains(opt, "=") { con = strings.SplitN(opt, "=", 2) } else if strings.Contains(opt, ":") { con = strings.SplitN(opt, ":", 2) logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.") } if len(con) != 2 { return fmt.Errorf("invalid --security-opt 1: %q", opt) } switch con[0] { case "label": labelOpts = append(labelOpts, con[1]) case "apparmor": container.AppArmorProfile = con[1] case "seccomp": container.SeccompProfile = con[1] case "no-new-privileges": noNewPrivileges, err := strconv.ParseBool(con[1]) if err != nil { return fmt.Errorf("invalid --security-opt 2: %q", opt) } container.NoNewPrivileges = noNewPrivileges default: return fmt.Errorf("invalid --security-opt 2: %q", opt) } } container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) return err } func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) { var throttleDevices []specs.LinuxThrottleDevice var stat unix.Stat_t for _, d := range devs { if err := unix.Stat(d.Path, &stat); err != nil { return nil, errors.WithStack(&os.PathError{Op: "stat", Path: d.Path, Err: err}) } d := specs.LinuxThrottleDevice{Rate: d.Rate} // the type is 32bit on mips d.Major = int64(unix.Major(uint64(stat.Rdev))) //nolint: unconvert d.Minor = int64(unix.Minor(uint64(stat.Rdev))) //nolint: unconvert throttleDevices = append(throttleDevices, d) } return throttleDevices, nil } // adjustParallelLimit takes a number of objects and a proposed limit and // figures out if it's reasonable (and adjusts it accordingly). This is only // used for daemon startup, which does a lot of parallel loading of containers // (and if we exceed RLIMIT_NOFILE then we're in trouble). func adjustParallelLimit(n int, limit int) int { // Rule-of-thumb overhead factor (how many files will each goroutine open // simultaneously). Yes, this is ugly but to be frank this whole thing is // ugly. const overhead = 2 // On Linux, we need to ensure that parallelStartupJobs doesn't cause us to // exceed RLIMIT_NOFILE. If parallelStartupJobs is too large, we reduce it // and give a warning (since in theory the user should increase their // ulimits to the largest possible value for dockerd). var rlim unix.Rlimit if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil { logrus.Warnf("Couldn't find dockerd's RLIMIT_NOFILE to double-check startup parallelism factor: %v", err) return limit } softRlimit := int(rlim.Cur) // Much fewer containers than RLIMIT_NOFILE. No need to adjust anything. if softRlimit > overhead*n { return limit } // RLIMIT_NOFILE big enough, no need to adjust anything. if softRlimit > overhead*limit { return limit } logrus.Warnf("Found dockerd's open file ulimit (%v) is far too small -- consider increasing it significantly (at least %v)", softRlimit, overhead*limit) return softRlimit / overhead } func checkKernel() error { // Check for unsupported kernel versions // FIXME: it would be cleaner to not test for specific versions, but rather // test for specific functionalities. // Unfortunately we can't test for the feature "does not cause a kernel panic" // without actually causing a kernel panic, so we need this workaround until // the circumstances of pre-3.10 crashes are clearer. // For details see https://github.com/docker/docker/issues/407 // Docker 1.11 and above doesn't actually run on kernels older than 3.4, // due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4). if !kernel.CheckKernelVersion(3, 10, 0) { v, _ := kernel.GetKernelVersion() if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String()) } } return nil } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if adjustCPUShares && hostConfig.CPUShares > 0 { // Handle unsupported CPUShares if hostConfig.CPUShares < linuxMinCPUShares { logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) hostConfig.CPUShares = linuxMinCPUShares } else if hostConfig.CPUShares > linuxMaxCPUShares { logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) hostConfig.CPUShares = linuxMaxCPUShares } } if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { // By default, MemorySwap is set to twice the size of Memory. hostConfig.MemorySwap = hostConfig.Memory * 2 } if hostConfig.ShmSize == 0 { hostConfig.ShmSize = config.DefaultShmSize if daemon.configStore != nil { hostConfig.ShmSize = int64(daemon.configStore.ShmSize) } } // Set default IPC mode, if unset for container if hostConfig.IpcMode.IsEmpty() { m := config.DefaultIpcMode if daemon.configStore != nil { m = containertypes.IpcMode(daemon.configStore.IpcMode) } hostConfig.IpcMode = m } // Set default cgroup namespace mode, if unset for container if hostConfig.CgroupnsMode.IsEmpty() { // for cgroup v2: unshare cgroupns even for privileged containers // https://github.com/containers/libpod/pull/4374#issuecomment-549776387 if hostConfig.Privileged && cgroups.Mode() != cgroups.Unified { hostConfig.CgroupnsMode = containertypes.CgroupnsModeHost } else { m := containertypes.CgroupnsModeHost if cgroups.Mode() == cgroups.Unified { m = containertypes.CgroupnsModePrivate } if daemon.configStore != nil { m = containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode) } hostConfig.CgroupnsMode = m } } adaptSharedNamespaceContainer(daemon, hostConfig) var err error secOpts, err := daemon.generateSecurityOpt(hostConfig) if err != nil { return err } hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, secOpts...) if hostConfig.OomKillDisable == nil { defaultOomKillDisable := false hostConfig.OomKillDisable = &defaultOomKillDisable } return nil } // adaptSharedNamespaceContainer replaces container name with its ID in hostConfig. // To be more precisely, it modifies `container:name` to `container:ID` of PidMode, IpcMode // and NetworkMode. // // When a container shares its namespace with another container, use ID can keep the namespace // sharing connection between the two containers even the another container is renamed. func adaptSharedNamespaceContainer(daemon containerGetter, hostConfig *containertypes.HostConfig) { containerPrefix := "container:" if hostConfig.PidMode.IsContainer() { pidContainer := hostConfig.PidMode.Container() // if there is any error returned here, we just ignore it and leave it to be // handled in the following logic if c, err := daemon.GetContainer(pidContainer); err == nil { hostConfig.PidMode = containertypes.PidMode(containerPrefix + c.ID) } } if hostConfig.IpcMode.IsContainer() { ipcContainer := hostConfig.IpcMode.Container() if c, err := daemon.GetContainer(ipcContainer); err == nil { hostConfig.IpcMode = containertypes.IpcMode(containerPrefix + c.ID) } } if hostConfig.NetworkMode.IsContainer() { netContainer := hostConfig.NetworkMode.ConnectedContainer() if c, err := daemon.GetContainer(netContainer); err == nil { hostConfig.NetworkMode = containertypes.NetworkMode(containerPrefix + c.ID) } } } // verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration func verifyPlatformContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) (warnings []string, err error) { fixMemorySwappiness(resources) // memory subsystem checks and adjustments if resources.Memory != 0 && resources.Memory < linuxMinMemory { return warnings, fmt.Errorf("Minimum memory limit allowed is 6MB") } if resources.Memory > 0 && !sysInfo.MemoryLimit { warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.Memory = 0 resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.") resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") } if resources.Memory == 0 && resources.MemorySwap > 0 && !update { return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") } if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness { warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") resources.MemorySwappiness = nil } if resources.MemorySwappiness != nil { swappiness := *resources.MemorySwappiness if swappiness < 0 || swappiness > 100 { return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) } } if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.MemoryReservation = 0 } if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { return warnings, fmt.Errorf("Minimum memory reservation allowed is 6MB") } if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage") } if resources.KernelMemory > 0 { // Kernel memory limit is not supported on cgroup v2. // Even on cgroup v1, kernel memory limit (`kmem.limit_in_bytes`) has been deprecated since kernel 5.4. // https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0b5adf44cae99b3ebcc7 warnings = append(warnings, "Specifying a kernel memory limit is deprecated and will be removed in a future release.") } if resources.KernelMemory > 0 && !sysInfo.KernelMemory { warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.KernelMemory = 0 } if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") } if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) { warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") } if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point // warning the caller if they already wanted the feature to be off if *resources.OomKillDisable { warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") } resources.OomKillDisable = nil } if resources.OomKillDisable != nil && *resources.OomKillDisable && resources.Memory == 0 { warnings = append(warnings, "OOM killer is disabled for the container, but no memory limit is set, this can result in the system running out of resources.") } if resources.PidsLimit != nil && !sysInfo.PidsLimit { if *resources.PidsLimit > 0 { warnings = append(warnings, "Your kernel does not support PIDs limit capabilities or the cgroup is not mounted. PIDs limit discarded.") } resources.PidsLimit = nil } // cpu subsystem checks and adjustments if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 { return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set") } if resources.NanoCPUs > 0 && resources.CPUQuota > 0 { return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set") } if resources.NanoCPUs > 0 && !sysInfo.CPUCfs { return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU CFS scheduler or the cgroup is not mounted") } // The highest precision we could get on Linux is 0.001, by setting // cpu.cfs_period_us=1000ms // cpu.cfs_quota=1ms // See the following link for details: // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt // Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error. // The error message is 0.01 so that this is consistent with Windows if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } if resources.CPUShares > 0 && !sysInfo.CPUShares { warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") resources.CPUShares = 0 } if (resources.CPUPeriod != 0 || resources.CPUQuota != 0) && !sysInfo.CPUCfs { warnings = append(warnings, "Your kernel does not support CPU CFS scheduler. CPU period/quota discarded.") resources.CPUPeriod = 0 resources.CPUQuota = 0 } if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") } if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") } if resources.CPUPercent > 0 { warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS)) resources.CPUPercent = 0 } // cpuset subsystem checks and adjustments if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") resources.CpusetCpus = "" resources.CpusetMems = "" } cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) if err != nil { return warnings, errors.Wrapf(err, "Invalid value %s for cpuset cpus", resources.CpusetCpus) } if !cpusAvailable { return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) } memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) if err != nil { return warnings, errors.Wrapf(err, "Invalid value %s for cpuset mems", resources.CpusetMems) } if !memsAvailable { return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) } // blkio subsystem checks and adjustments if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") resources.BlkioWeight = 0 } if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") } if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) } if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} } if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.") resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} } return warnings, nil } func (daemon *Daemon) getCgroupDriver() string { if UsingSystemd(daemon.configStore) { return cgroupSystemdDriver } if daemon.Rootless() { return cgroupNoneDriver } return cgroupFsDriver } // getCD gets the raw value of the native.cgroupdriver option, if set. func getCD(config *config.Config) string { for _, option := range config.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { continue } return val } return "" } // verifyCgroupDriver validates native.cgroupdriver func verifyCgroupDriver(config *config.Config) error { cd := getCD(config) if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { return nil } if cd == cgroupNoneDriver { return fmt.Errorf("native.cgroupdriver option %s is internally used and cannot be specified manually", cd) } return fmt.Errorf("native.cgroupdriver option %s not supported", cd) } // UsingSystemd returns true if cli option includes native.cgroupdriver=systemd func UsingSystemd(config *config.Config) bool { cd := getCD(config) if cd == cgroupSystemdDriver { return true } // On cgroup v2 hosts, default to systemd driver if cd == "" && cgroups.Mode() == cgroups.Unified && isRunningSystemd() { return true } return false } var ( runningSystemd bool detectSystemd sync.Once ) // isRunningSystemd checks whether the host was booted with systemd as its init // system. This functions similarly to systemd's `sd_booted(3)`: internally, it // checks whether /run/systemd/system/ exists and is a directory. // http://www.freedesktop.org/software/systemd/man/sd_booted.html // // NOTE: This function comes from package github.com/coreos/go-systemd/util // It was borrowed here to avoid a dependency on cgo. func isRunningSystemd() bool { detectSystemd.Do(func() { fi, err := os.Lstat("/run/systemd/system") if err != nil { return } runningSystemd = fi.IsDir() }) return runningSystemd } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) { if hostConfig == nil { return nil, nil } sysInfo := daemon.RawSysInfo() w, err := verifyPlatformContainerResources(&hostConfig.Resources, sysInfo, update) // no matter err is nil or not, w could have data in itself. warnings = append(warnings, w...) if err != nil { return warnings, err } if hostConfig.ShmSize < 0 { return warnings, fmt.Errorf("SHM size can not be less than 0") } if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) } // ip-forwarding does not affect container with '--net=host' (or '--net=none') if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") } if hostConfig.NetworkMode.IsHost() && len(hostConfig.PortBindings) > 0 { warnings = append(warnings, "Published ports are discarded when using host network mode") } // check for various conflicting options with user namespaces if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { if hostConfig.Privileged { return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces. You must run the container in the host namespace when running privileged mode") } if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("cannot share the host's network namespace when user namespaces are enabled") } if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled") } } if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { // CgroupParent for systemd cgroup should be named as "xxx.slice" if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if hostConfig.Runtime == "" { hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() } if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) } parser := volumemounts.NewParser() for dest := range hostConfig.Tmpfs { if err := parser.ValidateTmpfsMountDestination(dest); err != nil { return warnings, err } } if !hostConfig.CgroupnsMode.Valid() { return warnings, fmt.Errorf("invalid cgroup namespace mode: %v", hostConfig.CgroupnsMode) } if hostConfig.CgroupnsMode.IsPrivate() { if !sysInfo.CgroupNamespaces { warnings = append(warnings, "Your kernel does not support cgroup namespaces. Cgroup namespace setting discarded.") } } if hostConfig.Runtime == config.LinuxV1RuntimeName || (hostConfig.Runtime == "" && daemon.configStore.DefaultRuntime == config.LinuxV1RuntimeName) { warnings = append(warnings, fmt.Sprintf("Configured runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName)) } return warnings, nil } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(conf *config.Config) error { if conf.ContainerdNamespace == conf.ContainerdPluginNamespace { return errors.New("containers namespace and plugins namespace cannot be the same") } // Check for mutually incompatible config options if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" { return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") } if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication { return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") } if conf.BridgeConfig.EnableIP6Tables && !conf.Experimental { return fmt.Errorf("ip6tables rules are only available if experimental features are enabled") } if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq { conf.BridgeConfig.EnableIPMasq = false } if err := verifyCgroupDriver(conf); err != nil { return err } if conf.CgroupParent != "" && UsingSystemd(conf) { if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") { return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if conf.Rootless && UsingSystemd(conf) && cgroups.Mode() != cgroups.Unified { return fmt.Errorf("exec-opt native.cgroupdriver=systemd requires cgroup v2 for rootless mode") } configureRuntimes(conf) if rtName := conf.GetDefaultRuntimeName(); rtName != "" { if conf.GetRuntime(rtName) == nil { return fmt.Errorf("specified default runtime '%s' does not exist", rtName) } if rtName == config.LinuxV1RuntimeName { logrus.Warnf("Configured default runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName) } } return nil } // checkSystem validates platform-specific requirements func checkSystem() error { return checkKernel() } // configureMaxThreads sets the Go runtime max threads threshold // which is 90% of the kernel setting from /proc/sys/kernel/threads-max func configureMaxThreads(config *config.Config) error { mt, err := os.ReadFile("/proc/sys/kernel/threads-max") if err != nil { return err } mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) if err != nil { return err } maxThreads := (mtint / 100) * 90 debug.SetMaxThreads(maxThreads) logrus.Debugf("Golang's threads limit set to %d", maxThreads) return nil } func overlaySupportsSelinux() (bool, error) { f, err := os.Open("/proc/kallsyms") if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.HasSuffix(s.Text(), " security_inode_copy_up") { return true, nil } } return false, s.Err() } // configureKernelSecuritySupport configures and validates security support for the kernel func configureKernelSecuritySupport(config *config.Config, driverName string) error { if config.EnableSelinuxSupport { if !selinux.GetEnabled() { logrus.Warn("Docker could not enable SELinux on the host system") return nil } if driverName == "overlay" || driverName == "overlay2" { // If driver is overlay or overlay2, make sure kernel // supports selinux with overlay. supported, err := overlaySupportsSelinux() if err != nil { return err } if !supported { logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName) } } } else { selinux.SetDisabled() } return nil } func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) if err != nil { return nil, err } controller, err := libnetwork.New(netOptions...) if err != nil { return nil, fmt.Errorf("error obtaining controller instance: %v", err) } if len(activeSandboxes) > 0 { logrus.Info("There are old running containers, the network config will not take affect") return controller, nil } // Initialize default network on "null" if n, _ := controller.NetworkByName("none"); n == nil { if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) } } // Initialize default network on "host" if n, _ := controller.NetworkByName("host"); n == nil { if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) } } // Clear stale bridge network if n, err := controller.NetworkByName("bridge"); err == nil { if err = n.Delete(); err != nil { return nil, fmt.Errorf("could not delete the default bridge network: %v", err) } if len(config.NetworkConfig.DefaultAddressPools.Value()) > 0 && !daemon.configStore.LiveRestoreEnabled { removeDefaultBridgeInterface() } } if !config.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, config); err != nil { return nil, err } } else { removeDefaultBridgeInterface() } // Set HostGatewayIP to the default bridge's IP if it is empty if daemon.configStore.HostGatewayIP == nil && controller != nil { if n, err := controller.NetworkByName("bridge"); err == nil { v4Info, v6Info := n.Info().IpamInfo() var gateway net.IP if len(v4Info) > 0 { gateway = v4Info[0].Gateway.IP } else if len(v6Info) > 0 { gateway = v6Info[0].Gateway.IP } daemon.configStore.HostGatewayIP = gateway } } return controller, nil } func driverOptions(config *config.Config) nwconfig.Option { return nwconfig.OptionDriverConfig("bridge", options.Generic{ netlabel.GenericData: options.Generic{ "EnableIPForwarding": config.BridgeConfig.EnableIPForward, "EnableIPTables": config.BridgeConfig.EnableIPTables, "EnableIP6Tables": config.BridgeConfig.EnableIP6Tables, "EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy, "UserlandProxyPath": config.BridgeConfig.UserlandProxyPath, }, }) } func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { bridgeName := bridge.DefaultBridgeName if config.BridgeConfig.Iface != "" { bridgeName = config.BridgeConfig.Iface } netOption := map[string]string{ bridge.BridgeName: bridgeName, bridge.DefaultBridge: strconv.FormatBool(true), netlabel.DriverMTU: strconv.Itoa(config.Mtu), bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq), bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication), } // --ip processing if config.BridgeConfig.DefaultIP != nil { netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String() } ipamV4Conf := &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) if err != nil { return errors.Wrap(err, "list bridge addresses failed") } nw := nwList[0] if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" { _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return errors.Wrap(err, "parse CIDR failed") } // Iterate through in case there are multiple addresses for the bridge for _, entry := range nwList { if fCIDR.Contains(entry.IP) { nw = entry break } } } ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) if hip.IsGlobalUnicast() { ipamV4Conf.Gateway = nw.IP.String() } if config.BridgeConfig.IP != "" { ip, ipNet, err := net.ParseCIDR(config.BridgeConfig.IP) if err != nil { return err } ipamV4Conf.PreferredPool = ipNet.String() ipamV4Conf.Gateway = ip.String() } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) } if config.BridgeConfig.FixedCIDR != "" { _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return err } ipamV4Conf.SubPool = fCIDR.String() } if config.BridgeConfig.DefaultGatewayIPv4 != nil { ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String() } var ( deferIPv6Alloc bool ipamV6Conf *libnetwork.IpamConf ) if config.BridgeConfig.EnableIPv6 && config.BridgeConfig.FixedCIDRv6 == "" { return errdefs.InvalidParameter(errors.New("IPv6 is enabled for the default bridge, but no subnet is configured. Specify an IPv6 subnet using --fixed-cidr-v6")) } else if config.BridgeConfig.FixedCIDRv6 != "" { _, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6) if err != nil { return err } // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has // at least 48 host bits, we need to guarantee the current behavior where the containers' // IPv6 addresses will be constructed based on the containers' interface MAC address. // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints // on this network until after the driver has created the endpoint and returned the // constructed address. Libnetwork will then reserve this address with the ipam driver. ones, _ := fCIDRv6.Mask.Size() deferIPv6Alloc = ones <= 80 ipamV6Conf = &libnetwork.IpamConf{ AuxAddresses: make(map[string]string), PreferredPool: fCIDRv6.String(), } // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 // address belongs to the same network, we need to inform libnetwork about it, so // that it can be reserved with IPAM and it will not be given away to somebody else for _, nw6 := range nw6List { if fCIDRv6.Contains(nw6.IP) { ipamV6Conf.Gateway = nw6.IP.String() break } } } if config.BridgeConfig.DefaultGatewayIPv6 != nil { if ipamV6Conf == nil { ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} } ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String() } v4Conf := []*libnetwork.IpamConf{ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} if ipamV6Conf != nil { v6Conf = append(v6Conf, ipamV6Conf) } // Initialize default network on "bridge" with the same name _, err = controller.NewNetwork("bridge", "bridge", "", libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6), libnetwork.NetworkOptionDriverOpts(netOption), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) if err != nil { return fmt.Errorf("Error creating default \"bridge\" network: %v", err) } return nil } // Remove default bridge interface if present (--bridge=none use case) func removeDefaultBridgeInterface() { if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil { if err := netlink.LinkDel(lnk); err != nil { logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) } } } func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error { return func(initPath containerfs.ContainerFS) error { return initlayer.Setup(initPath, idMapping.RootPair()) } } // Parse the remapped root (user namespace) option, which can be one of: // username - valid username from /etc/passwd // username:groupname - valid username; valid groupname from /etc/group // uid - 32-bit unsigned int valid Linux UID value // uid:gid - uid value; 32-bit unsigned int Linux GID value // // If no groupname is specified, and a username is specified, an attempt // will be made to lookup a gid for that username as a groupname // // If names are used, they are verified to exist in passwd/group func parseRemappedRoot(usergrp string) (string, string, error) { var ( userID, groupID int username, groupname string ) idparts := strings.Split(usergrp, ":") if len(idparts) > 2 { return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) } if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { // must be a uid; take it as valid userID = int(uid) luser, err := idtools.LookupUID(userID) if err != nil { return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) } username = luser.Name if len(idparts) == 1 { // if the uid was numeric and no gid was specified, take the uid as the gid groupID = userID lgrp, err := idtools.LookupGID(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) } groupname = lgrp.Name } } else { lookupName := idparts[0] // special case: if the user specified "default", they want Docker to create or // use (after creation) the "dockremap" user/group for root remapping if lookupName == defaultIDSpecifier { lookupName = defaultRemappedID } luser, err := idtools.LookupUser(lookupName) if err != nil && idparts[0] != defaultIDSpecifier { // error if the name requested isn't the special "dockremap" ID return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) } else if err != nil { // special case-- if the username == "default", then we have been asked // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} // ranges will be used for the user and group mappings in user namespaced containers _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) if err == nil { return defaultRemappedID, defaultRemappedID, nil } return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) } username = luser.Name if len(idparts) == 1 { // we only have a string username, and no group specified; look up gid from username as group group, err := idtools.LookupGroup(lookupName) if err != nil { return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) } groupname = group.Name } } if len(idparts) == 2 { // groupname or gid is separately specified and must be resolved // to an unsigned 32-bit gid if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { // must be a gid, take it as valid groupID = int(gid) lgrp, err := idtools.LookupGID(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) } groupname = lgrp.Name } else { // not a number; attempt a lookup if _, err := idtools.LookupGroup(idparts[1]); err != nil { return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) } groupname = idparts[1] } } return username, groupname, nil } func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) { if runtime.GOOS != "linux" && config.RemappedRoot != "" { return nil, fmt.Errorf("User namespaces are only supported on Linux") } // if the daemon was started with remapped root option, parse // the config option to the int uid,gid values if config.RemappedRoot != "" { username, groupname, err := parseRemappedRoot(config.RemappedRoot) if err != nil { return nil, err } if username == "root" { // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op // effectively logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") return &idtools.IdentityMapping{}, nil } logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s", username) // update remapped root setting now that we have resolved them to actual names config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) mappings, err := idtools.NewIdentityMapping(username) if err != nil { return nil, errors.Wrap(err, "Can't create ID mappings") } return mappings, nil } return &idtools.IdentityMapping{}, nil } func setupDaemonRoot(config *config.Config, rootDir string, remappedRoot idtools.Identity) error { config.Root = rootDir // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) // so that syscalls executing as non-root, operating on subdirectories of the graph root // (e.g. mounted layers of a container) can traverse this path. // The user namespace support will create subdirectories for the remapped root host uid:gid // pair owned by that same uid:gid pair for proper write access to those needed metadata and // layer content subtrees. if _, err := os.Stat(rootDir); err == nil { // root current exists; verify the access bits are correct by setting them if err = os.Chmod(rootDir, 0711); err != nil { return err } } else if os.IsNotExist(err) { // no root exists yet, create it 0711 with root:root ownership if err := os.MkdirAll(rootDir, 0711); err != nil { return err } } id := idtools.Identity{UID: idtools.CurrentIdentity().UID, GID: remappedRoot.GID} // First make sure the current root dir has the correct perms. if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil { return errors.Wrapf(err, "could not create or set daemon root permissions: %s", config.Root) } // if user namespaces are enabled we will create a subtree underneath the specified root // with any/all specified remapped root uid/gid options on the daemon creating // a new subdirectory with ownership set to the remapped uid/gid (so as to allow // `chdir()` to work for containers namespaced to that uid/gid) if config.RemappedRoot != "" { config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", remappedRoot.UID, remappedRoot.GID)) logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) // Create the root directory if it doesn't exist if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil { return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) } // we also need to verify that any pre-existing directories in the path to // the graphroot won't block access to remapped root--if any pre-existing directory // has strict permissions that don't allow "x", container start will fail, so // better to warn and fail now dirPath := config.Root for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if !idtools.CanAccess(dirPath, remappedRoot) { return fmt.Errorf("a subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories", config.Root) } } } if err := setupDaemonRootPropagation(config); err != nil { logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior") } return nil } func setupDaemonRootPropagation(cfg *config.Config) error { rootParentMount, mountOptions, err := getSourceMount(cfg.Root) if err != nil { return errors.Wrap(err, "error getting daemon root's parent mount") } var cleanupOldFile bool cleanupFile := getUnmountOnShutdownPath(cfg) defer func() { if !cleanupOldFile { return } if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) { logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file") } }() if hasMountInfoOption(mountOptions, sharedPropagationOption, slavePropagationOption) { cleanupOldFile = true return nil } if err := mount.MakeShared(cfg.Root); err != nil { return errors.Wrap(err, "could not setup daemon root propagation to shared") } // check the case where this may have already been a mount to itself. // If so then the daemon only performed a remount and should not try to unmount this later. if rootParentMount == cfg.Root { cleanupOldFile = true return nil } if err := os.MkdirAll(filepath.Dir(cleanupFile), 0700); err != nil { return errors.Wrap(err, "error creating dir to store mount cleanup file") } if err := os.WriteFile(cleanupFile, nil, 0600); err != nil { return errors.Wrap(err, "error writing file to signal mount cleanup on shutdown") } return nil } // getUnmountOnShutdownPath generates the path to used when writing the file that signals to the daemon that on shutdown // the daemon root should be unmounted. func getUnmountOnShutdownPath(config *config.Config) string { return filepath.Join(config.ExecRoot, "unmount-on-shutdown") } // registerLinks writes the links to a file. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { return nil } for _, l := range hostConfig.Links { name, alias, err := opts.ParseLink(l) if err != nil { return err } child, err := daemon.GetContainer(name) if err != nil { if errdefs.IsNotFound(err) { // Trying to link to a non-existing container is not valid, and // should return an "invalid parameter" error. Returning a "not // found" error here would make the client report the container's // image could not be found (see moby/moby#39823) err = errdefs.InvalidParameter(err) } return errors.Wrapf(err, "could not get container for %s", name) } for child.HostConfig.NetworkMode.IsContainer() { parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) child, err = daemon.GetContainer(parts[1]) if err != nil { if errdefs.IsNotFound(err) { // Trying to link to a non-existing container is not valid, and // should return an "invalid parameter" error. Returning a "not // found" error here would make the client report the container's // image could not be found (see moby/moby#39823) err = errdefs.InvalidParameter(err) } return errors.Wrapf(err, "Could not get container for %s", parts[1]) } } if child.HostConfig.NetworkMode.IsHost() { return runconfig.ErrConflictHostNetworkAndLinks } if err := daemon.registerLink(container, child, alias); err != nil { return err } } // After we load all the links into the daemon // set them to nil on the hostconfig _, err := container.WriteHostConfig() return err } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { return daemon.Unmount(container) } func copyBlkioEntry(entries []*statsV1.BlkIOEntry) []types.BlkioStatEntry { out := make([]types.BlkioStatEntry, len(entries)) for i, re := range entries { out[i] = types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: re.Op, Value: re.Value, } } return out } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning(c.ID) } cs, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { if strings.Contains(err.Error(), "container not found") { return nil, containerNotFound(c.ID) } return nil, err } s := &types.StatsJSON{} s.Read = cs.Read stats := cs.Metrics switch t := stats.(type) { case *statsV1.Metrics: return daemon.statsV1(s, t) case *statsV2.Metrics: return daemon.statsV2(s, t) default: return nil, errors.Errorf("unexpected type of metrics %+v", t) } } func (daemon *Daemon) statsV1(s *types.StatsJSON, stats *statsV1.Metrics) (*types.StatsJSON, error) { if stats.Blkio != nil { s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: copyBlkioEntry(stats.Blkio.IoServiceBytesRecursive), IoServicedRecursive: copyBlkioEntry(stats.Blkio.IoServicedRecursive), IoQueuedRecursive: copyBlkioEntry(stats.Blkio.IoQueuedRecursive), IoServiceTimeRecursive: copyBlkioEntry(stats.Blkio.IoServiceTimeRecursive), IoWaitTimeRecursive: copyBlkioEntry(stats.Blkio.IoWaitTimeRecursive), IoMergedRecursive: copyBlkioEntry(stats.Blkio.IoMergedRecursive), IoTimeRecursive: copyBlkioEntry(stats.Blkio.IoTimeRecursive), SectorsRecursive: copyBlkioEntry(stats.Blkio.SectorsRecursive), } } if stats.CPU != nil { s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: stats.CPU.Usage.Total, PercpuUsage: stats.CPU.Usage.PerCPU, UsageInKernelmode: stats.CPU.Usage.Kernel, UsageInUsermode: stats.CPU.Usage.User, }, ThrottlingData: types.ThrottlingData{ Periods: stats.CPU.Throttling.Periods, ThrottledPeriods: stats.CPU.Throttling.ThrottledPeriods, ThrottledTime: stats.CPU.Throttling.ThrottledTime, }, } } if stats.Memory != nil { raw := map[string]uint64{ "cache": stats.Memory.Cache, "rss": stats.Memory.RSS, "rss_huge": stats.Memory.RSSHuge, "mapped_file": stats.Memory.MappedFile, "dirty": stats.Memory.Dirty, "writeback": stats.Memory.Writeback, "pgpgin": stats.Memory.PgPgIn, "pgpgout": stats.Memory.PgPgOut, "pgfault": stats.Memory.PgFault, "pgmajfault": stats.Memory.PgMajFault, "inactive_anon": stats.Memory.InactiveAnon, "active_anon": stats.Memory.ActiveAnon, "inactive_file": stats.Memory.InactiveFile, "active_file": stats.Memory.ActiveFile, "unevictable": stats.Memory.Unevictable, "hierarchical_memory_limit": stats.Memory.HierarchicalMemoryLimit, "hierarchical_memsw_limit": stats.Memory.HierarchicalSwapLimit, "total_cache": stats.Memory.TotalCache, "total_rss": stats.Memory.TotalRSS, "total_rss_huge": stats.Memory.TotalRSSHuge, "total_mapped_file": stats.Memory.TotalMappedFile, "total_dirty": stats.Memory.TotalDirty, "total_writeback": stats.Memory.TotalWriteback, "total_pgpgin": stats.Memory.TotalPgPgIn, "total_pgpgout": stats.Memory.TotalPgPgOut, "total_pgfault": stats.Memory.TotalPgFault, "total_pgmajfault": stats.Memory.TotalPgMajFault, "total_inactive_anon": stats.Memory.TotalInactiveAnon, "total_active_anon": stats.Memory.TotalActiveAnon, "total_inactive_file": stats.Memory.TotalInactiveFile, "total_active_file": stats.Memory.TotalActiveFile, "total_unevictable": stats.Memory.TotalUnevictable, } if stats.Memory.Usage != nil { s.MemoryStats = types.MemoryStats{ Stats: raw, Usage: stats.Memory.Usage.Usage, MaxUsage: stats.Memory.Usage.Max, Limit: stats.Memory.Usage.Limit, Failcnt: stats.Memory.Usage.Failcnt, } } else { s.MemoryStats = types.MemoryStats{ Stats: raw, } } // if the container does not set memory limit, use the machineMemory if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { s.MemoryStats.Limit = daemon.machineMemory } } if stats.Pids != nil { s.PidsStats = types.PidsStats{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, } } return s, nil } func (daemon *Daemon) statsV2(s *types.StatsJSON, stats *statsV2.Metrics) (*types.StatsJSON, error) { if stats.Io != nil { var isbr []types.BlkioStatEntry for _, re := range stats.Io.Usage { isbr = append(isbr, types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: "read", Value: re.Rbytes, }, types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: "write", Value: re.Wbytes, }, ) } s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: isbr, // Other fields are unsupported } } if stats.CPU != nil { s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: stats.CPU.UsageUsec * 1000, // PercpuUsage is not supported UsageInKernelmode: stats.CPU.SystemUsec * 1000, UsageInUsermode: stats.CPU.UserUsec * 1000, }, ThrottlingData: types.ThrottlingData{ Periods: stats.CPU.NrPeriods, ThrottledPeriods: stats.CPU.NrThrottled, ThrottledTime: stats.CPU.ThrottledUsec * 1000, }, } } if stats.Memory != nil { s.MemoryStats = types.MemoryStats{ // Stats is not compatible with v1 Stats: map[string]uint64{ "anon": stats.Memory.Anon, "file": stats.Memory.File, "kernel_stack": stats.Memory.KernelStack, "slab": stats.Memory.Slab, "sock": stats.Memory.Sock, "shmem": stats.Memory.Shmem, "file_mapped": stats.Memory.FileMapped, "file_dirty": stats.Memory.FileDirty, "file_writeback": stats.Memory.FileWriteback, "anon_thp": stats.Memory.AnonThp, "inactive_anon": stats.Memory.InactiveAnon, "active_anon": stats.Memory.ActiveAnon, "inactive_file": stats.Memory.InactiveFile, "active_file": stats.Memory.ActiveFile, "unevictable": stats.Memory.Unevictable, "slab_reclaimable": stats.Memory.SlabReclaimable, "slab_unreclaimable": stats.Memory.SlabUnreclaimable, "pgfault": stats.Memory.Pgfault, "pgmajfault": stats.Memory.Pgmajfault, "workingset_refault": stats.Memory.WorkingsetRefault, "workingset_activate": stats.Memory.WorkingsetActivate, "workingset_nodereclaim": stats.Memory.WorkingsetNodereclaim, "pgrefill": stats.Memory.Pgrefill, "pgscan": stats.Memory.Pgscan, "pgsteal": stats.Memory.Pgsteal, "pgactivate": stats.Memory.Pgactivate, "pgdeactivate": stats.Memory.Pgdeactivate, "pglazyfree": stats.Memory.Pglazyfree, "pglazyfreed": stats.Memory.Pglazyfreed, "thp_fault_alloc": stats.Memory.ThpFaultAlloc, "thp_collapse_alloc": stats.Memory.ThpCollapseAlloc, }, Usage: stats.Memory.Usage, // MaxUsage is not supported Limit: stats.Memory.UsageLimit, } // if the container does not set memory limit, use the machineMemory if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { s.MemoryStats.Limit = daemon.machineMemory } if stats.MemoryEvents != nil { // Failcnt is set to the "oom" field of the "memory.events" file. // See https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html s.MemoryStats.Failcnt = stats.MemoryEvents.Oom } } if stats.Pids != nil { s.PidsStats = types.PidsStats{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, } } return s, nil } // setDefaultIsolation determines the default isolation mode for the // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { return nil } // setupDaemonProcess sets various settings for the daemon's process func setupDaemonProcess(config *config.Config) error { // setup the daemons oom_score_adj if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil { return err } if err := setMayDetachMounts(); err != nil { logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter") } return nil } // This is used to allow removal of mountpoints that may be mounted in other // namespaces on RHEL based kernels starting from RHEL 7.4. // Without this setting, removals on these RHEL based kernels may fail with // "device or resource busy". // This setting is not available in upstream kernels as it is not configurable, // but has been in the upstream kernels since 3.15. func setMayDetachMounts() error { f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0) if err != nil { if os.IsNotExist(err) { return nil } return errors.Wrap(err, "error opening may_detach_mounts kernel config file") } defer f.Close() _, err = f.WriteString("1") if os.IsPermission(err) { // Setting may_detach_mounts does not work in an // unprivileged container. Ignore the error, but log // it if we appear not to be in that situation. if !userns.RunningInUserNS() { logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1") } return nil } return err } func setupOOMScoreAdj(score int) error { if score == 0 { return nil } f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) if err != nil { return err } defer f.Close() stringScore := strconv.Itoa(score) _, err = f.WriteString(stringScore) if os.IsPermission(err) { // Setting oom_score_adj does not work in an // unprivileged container. Ignore the error, but log // it if we appear not to be in that situation. if !userns.RunningInUserNS() { logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore) } return nil } return err } func (daemon *Daemon) initCPURtController(mnt, path string) error { if path == "/" || path == "." { return nil } // Recursively create cgroup to ensure that the system and all parent cgroups have values set // for the period and runtime as this limits what the children can be set to. if err := daemon.initCPURtController(mnt, filepath.Dir(path)); err != nil { return err } path = filepath.Join(mnt, path) if err := os.MkdirAll(path, 0755); err != nil { return err } if err := maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil { return err } return maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path) } func maybeCreateCPURealTimeFile(configValue int64, file string, path string) error { if configValue == 0 { return nil } return os.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700) } func (daemon *Daemon) setupSeccompProfile() error { switch profile := daemon.configStore.SeccompProfile; profile { case "", config.SeccompProfileDefault: daemon.seccompProfilePath = config.SeccompProfileDefault case config.SeccompProfileUnconfined: daemon.seccompProfilePath = config.SeccompProfileUnconfined default: daemon.seccompProfilePath = profile b, err := os.ReadFile(profile) if err != nil { return fmt.Errorf("opening seccomp profile (%s) failed: %v", profile, err) } daemon.seccompProfile = b } return nil } // RawSysInfo returns *sysinfo.SysInfo . func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo { var siOpts []sysinfo.Opt if daemon.getCgroupDriver() == cgroupSystemdDriver { if euid := os.Getenv("ROOTLESSKIT_PARENT_EUID"); euid != "" { siOpts = append(siOpts, sysinfo.WithCgroup2GroupPath("/user.slice/user-"+euid+".slice")) } } return sysinfo.New(siOpts...) } func recursiveUnmount(target string) error { return mount.RecursiveUnmount(target) } func (daemon *Daemon) initLibcontainerd(ctx context.Context) error { var err error daemon.containerd, err = remote.NewClient( ctx, daemon.containerdCli, filepath.Join(daemon.configStore.ExecRoot, "containerd"), daemon.configStore.ContainerdNamespace, daemon, ) return err }
//go:build linux || freebsd // +build linux freebsd package daemon // import "github.com/docker/docker/daemon" import ( "bufio" "context" "fmt" "net" "os" "path/filepath" "runtime" "runtime/debug" "strconv" "strings" "sync" "time" "github.com/containerd/cgroups" statsV1 "github.com/containerd/cgroups/stats/v1" statsV2 "github.com/containerd/cgroups/v2/stats" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/blkiodev" pblkiodev "github.com/docker/docker/api/types/blkiodev" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/initlayer" "github.com/docker/docker/errdefs" "github.com/docker/docker/libcontainerd/remote" "github.com/docker/docker/libnetwork" nwconfig "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/drivers/bridge" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/netutils" "github.com/docker/docker/libnetwork/options" lntypes "github.com/docker/docker/libnetwork/types" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/runconfig" volumemounts "github.com/docker/docker/volume/mounts" "github.com/moby/sys/mount" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" ) const ( isWindows = false // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 linuxMinCPUShares = 2 linuxMaxCPUShares = 262144 platformSupported = true // It's not kernel limit, we want this 6M limit to account for overhead during startup, and to supply a reasonable functional container linuxMinMemory = 6291456 // constants for remapped root settings defaultIDSpecifier = "default" defaultRemappedID = "dockremap" // constant for cgroup drivers cgroupFsDriver = "cgroupfs" cgroupSystemdDriver = "systemd" cgroupNoneDriver = "none" ) type containerGetter interface { GetContainer(string) (*container.Container, error) } func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory { memory := specs.LinuxMemory{} if config.Memory > 0 { memory.Limit = &config.Memory } if config.MemoryReservation > 0 { memory.Reservation = &config.MemoryReservation } if config.MemorySwap > 0 { memory.Swap = &config.MemorySwap } if config.MemorySwappiness != nil { swappiness := uint64(*config.MemorySwappiness) memory.Swappiness = &swappiness } if config.OomKillDisable != nil { memory.DisableOOMKiller = config.OomKillDisable } if config.KernelMemory != 0 { memory.Kernel = &config.KernelMemory } if config.KernelMemoryTCP != 0 { memory.KernelTCP = &config.KernelMemoryTCP } return &memory } func getPidsLimit(config containertypes.Resources) *specs.LinuxPids { if config.PidsLimit == nil { return nil } if *config.PidsLimit <= 0 { // docker API allows 0 and negative values to unset this to be consistent // with default values. When updating values, runc requires -1 to unset // the previous limit. return &specs.LinuxPids{Limit: -1} } return &specs.LinuxPids{Limit: *config.PidsLimit} } func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) { cpu := specs.LinuxCPU{} if config.CPUShares < 0 { return nil, fmt.Errorf("shares: invalid argument") } if config.CPUShares >= 0 { shares := uint64(config.CPUShares) cpu.Shares = &shares } if config.CpusetCpus != "" { cpu.Cpus = config.CpusetCpus } if config.CpusetMems != "" { cpu.Mems = config.CpusetMems } if config.NanoCPUs > 0 { // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt period := uint64(100 * time.Millisecond / time.Microsecond) quota := config.NanoCPUs * int64(period) / 1e9 cpu.Period = &period cpu.Quota = &quota } if config.CPUPeriod != 0 { period := uint64(config.CPUPeriod) cpu.Period = &period } if config.CPUQuota != 0 { q := config.CPUQuota cpu.Quota = &q } if config.CPURealtimePeriod != 0 { period := uint64(config.CPURealtimePeriod) cpu.RealtimePeriod = &period } if config.CPURealtimeRuntime != 0 { c := config.CPURealtimeRuntime cpu.RealtimeRuntime = &c } return &cpu, nil } func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) { var stat unix.Stat_t var blkioWeightDevices []specs.LinuxWeightDevice for _, weightDevice := range config.BlkioWeightDevice { if err := unix.Stat(weightDevice.Path, &stat); err != nil { return nil, errors.WithStack(&os.PathError{Op: "stat", Path: weightDevice.Path, Err: err}) } weight := weightDevice.Weight d := specs.LinuxWeightDevice{Weight: &weight} // The type is 32bit on mips. d.Major = int64(unix.Major(uint64(stat.Rdev))) //nolint: unconvert d.Minor = int64(unix.Minor(uint64(stat.Rdev))) //nolint: unconvert blkioWeightDevices = append(blkioWeightDevices, d) } return blkioWeightDevices, nil } func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { container.NoNewPrivileges = daemon.configStore.NoNewPrivileges return parseSecurityOpt(container, hostConfig) } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { var ( labelOpts []string err error ) for _, opt := range config.SecurityOpt { if opt == "no-new-privileges" { container.NoNewPrivileges = true continue } if opt == "disable" { labelOpts = append(labelOpts, "disable") continue } var con []string if strings.Contains(opt, "=") { con = strings.SplitN(opt, "=", 2) } else if strings.Contains(opt, ":") { con = strings.SplitN(opt, ":", 2) logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.") } if len(con) != 2 { return fmt.Errorf("invalid --security-opt 1: %q", opt) } switch con[0] { case "label": labelOpts = append(labelOpts, con[1]) case "apparmor": container.AppArmorProfile = con[1] case "seccomp": container.SeccompProfile = con[1] case "no-new-privileges": noNewPrivileges, err := strconv.ParseBool(con[1]) if err != nil { return fmt.Errorf("invalid --security-opt 2: %q", opt) } container.NoNewPrivileges = noNewPrivileges default: return fmt.Errorf("invalid --security-opt 2: %q", opt) } } container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) return err } func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) { var throttleDevices []specs.LinuxThrottleDevice var stat unix.Stat_t for _, d := range devs { if err := unix.Stat(d.Path, &stat); err != nil { return nil, errors.WithStack(&os.PathError{Op: "stat", Path: d.Path, Err: err}) } d := specs.LinuxThrottleDevice{Rate: d.Rate} // the type is 32bit on mips d.Major = int64(unix.Major(uint64(stat.Rdev))) //nolint: unconvert d.Minor = int64(unix.Minor(uint64(stat.Rdev))) //nolint: unconvert throttleDevices = append(throttleDevices, d) } return throttleDevices, nil } // adjustParallelLimit takes a number of objects and a proposed limit and // figures out if it's reasonable (and adjusts it accordingly). This is only // used for daemon startup, which does a lot of parallel loading of containers // (and if we exceed RLIMIT_NOFILE then we're in trouble). func adjustParallelLimit(n int, limit int) int { // Rule-of-thumb overhead factor (how many files will each goroutine open // simultaneously). Yes, this is ugly but to be frank this whole thing is // ugly. const overhead = 2 // On Linux, we need to ensure that parallelStartupJobs doesn't cause us to // exceed RLIMIT_NOFILE. If parallelStartupJobs is too large, we reduce it // and give a warning (since in theory the user should increase their // ulimits to the largest possible value for dockerd). var rlim unix.Rlimit if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil { logrus.Warnf("Couldn't find dockerd's RLIMIT_NOFILE to double-check startup parallelism factor: %v", err) return limit } softRlimit := int(rlim.Cur) // Much fewer containers than RLIMIT_NOFILE. No need to adjust anything. if softRlimit > overhead*n { return limit } // RLIMIT_NOFILE big enough, no need to adjust anything. if softRlimit > overhead*limit { return limit } logrus.Warnf("Found dockerd's open file ulimit (%v) is far too small -- consider increasing it significantly (at least %v)", softRlimit, overhead*limit) return softRlimit / overhead } func checkKernel() error { // Check for unsupported kernel versions // FIXME: it would be cleaner to not test for specific versions, but rather // test for specific functionalities. // Unfortunately we can't test for the feature "does not cause a kernel panic" // without actually causing a kernel panic, so we need this workaround until // the circumstances of pre-3.10 crashes are clearer. // For details see https://github.com/docker/docker/issues/407 // Docker 1.11 and above doesn't actually run on kernels older than 3.4, // due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4). if !kernel.CheckKernelVersion(3, 10, 0) { v, _ := kernel.GetKernelVersion() if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String()) } } return nil } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if adjustCPUShares && hostConfig.CPUShares > 0 { // Handle unsupported CPUShares if hostConfig.CPUShares < linuxMinCPUShares { logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) hostConfig.CPUShares = linuxMinCPUShares } else if hostConfig.CPUShares > linuxMaxCPUShares { logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) hostConfig.CPUShares = linuxMaxCPUShares } } if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { // By default, MemorySwap is set to twice the size of Memory. hostConfig.MemorySwap = hostConfig.Memory * 2 } if hostConfig.ShmSize == 0 { hostConfig.ShmSize = config.DefaultShmSize if daemon.configStore != nil { hostConfig.ShmSize = int64(daemon.configStore.ShmSize) } } // Set default IPC mode, if unset for container if hostConfig.IpcMode.IsEmpty() { m := config.DefaultIpcMode if daemon.configStore != nil { m = containertypes.IpcMode(daemon.configStore.IpcMode) } hostConfig.IpcMode = m } // Set default cgroup namespace mode, if unset for container if hostConfig.CgroupnsMode.IsEmpty() { // for cgroup v2: unshare cgroupns even for privileged containers // https://github.com/containers/libpod/pull/4374#issuecomment-549776387 if hostConfig.Privileged && cgroups.Mode() != cgroups.Unified { hostConfig.CgroupnsMode = containertypes.CgroupnsModeHost } else { m := containertypes.CgroupnsModeHost if cgroups.Mode() == cgroups.Unified { m = containertypes.CgroupnsModePrivate } if daemon.configStore != nil { m = containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode) } hostConfig.CgroupnsMode = m } } adaptSharedNamespaceContainer(daemon, hostConfig) var err error secOpts, err := daemon.generateSecurityOpt(hostConfig) if err != nil { return err } hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, secOpts...) if hostConfig.OomKillDisable == nil { defaultOomKillDisable := false hostConfig.OomKillDisable = &defaultOomKillDisable } return nil } // adaptSharedNamespaceContainer replaces container name with its ID in hostConfig. // To be more precisely, it modifies `container:name` to `container:ID` of PidMode, IpcMode // and NetworkMode. // // When a container shares its namespace with another container, use ID can keep the namespace // sharing connection between the two containers even the another container is renamed. func adaptSharedNamespaceContainer(daemon containerGetter, hostConfig *containertypes.HostConfig) { containerPrefix := "container:" if hostConfig.PidMode.IsContainer() { pidContainer := hostConfig.PidMode.Container() // if there is any error returned here, we just ignore it and leave it to be // handled in the following logic if c, err := daemon.GetContainer(pidContainer); err == nil { hostConfig.PidMode = containertypes.PidMode(containerPrefix + c.ID) } } if hostConfig.IpcMode.IsContainer() { ipcContainer := hostConfig.IpcMode.Container() if c, err := daemon.GetContainer(ipcContainer); err == nil { hostConfig.IpcMode = containertypes.IpcMode(containerPrefix + c.ID) } } if hostConfig.NetworkMode.IsContainer() { netContainer := hostConfig.NetworkMode.ConnectedContainer() if c, err := daemon.GetContainer(netContainer); err == nil { hostConfig.NetworkMode = containertypes.NetworkMode(containerPrefix + c.ID) } } } // verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration func verifyPlatformContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) (warnings []string, err error) { fixMemorySwappiness(resources) // memory subsystem checks and adjustments if resources.Memory != 0 && resources.Memory < linuxMinMemory { return warnings, fmt.Errorf("Minimum memory limit allowed is 6MB") } if resources.Memory > 0 && !sysInfo.MemoryLimit { warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.Memory = 0 resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.") resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") } if resources.Memory == 0 && resources.MemorySwap > 0 && !update { return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") } if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness { warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") resources.MemorySwappiness = nil } if resources.MemorySwappiness != nil { swappiness := *resources.MemorySwappiness if swappiness < 0 || swappiness > 100 { return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) } } if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.MemoryReservation = 0 } if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { return warnings, fmt.Errorf("Minimum memory reservation allowed is 6MB") } if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage") } if resources.KernelMemory > 0 { // Kernel memory limit is not supported on cgroup v2. // Even on cgroup v1, kernel memory limit (`kmem.limit_in_bytes`) has been deprecated since kernel 5.4. // https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0b5adf44cae99b3ebcc7 warnings = append(warnings, "Specifying a kernel memory limit is deprecated and will be removed in a future release.") } if resources.KernelMemory > 0 && !sysInfo.KernelMemory { warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") resources.KernelMemory = 0 } if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") } if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) { warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") } if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point // warning the caller if they already wanted the feature to be off if *resources.OomKillDisable { warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") } resources.OomKillDisable = nil } if resources.OomKillDisable != nil && *resources.OomKillDisable && resources.Memory == 0 { warnings = append(warnings, "OOM killer is disabled for the container, but no memory limit is set, this can result in the system running out of resources.") } if resources.PidsLimit != nil && !sysInfo.PidsLimit { if *resources.PidsLimit > 0 { warnings = append(warnings, "Your kernel does not support PIDs limit capabilities or the cgroup is not mounted. PIDs limit discarded.") } resources.PidsLimit = nil } // cpu subsystem checks and adjustments if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 { return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set") } if resources.NanoCPUs > 0 && resources.CPUQuota > 0 { return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set") } if resources.NanoCPUs > 0 && !sysInfo.CPUCfs { return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU CFS scheduler or the cgroup is not mounted") } // The highest precision we could get on Linux is 0.001, by setting // cpu.cfs_period_us=1000ms // cpu.cfs_quota=1ms // See the following link for details: // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt // Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error. // The error message is 0.01 so that this is consistent with Windows if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } if resources.CPUShares > 0 && !sysInfo.CPUShares { warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") resources.CPUShares = 0 } if (resources.CPUPeriod != 0 || resources.CPUQuota != 0) && !sysInfo.CPUCfs { warnings = append(warnings, "Your kernel does not support CPU CFS scheduler. CPU period/quota discarded.") resources.CPUPeriod = 0 resources.CPUQuota = 0 } if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") } if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") } if resources.CPUPercent > 0 { warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS)) resources.CPUPercent = 0 } // cpuset subsystem checks and adjustments if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") resources.CpusetCpus = "" resources.CpusetMems = "" } cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) if err != nil { return warnings, errors.Wrapf(err, "Invalid value %s for cpuset cpus", resources.CpusetCpus) } if !cpusAvailable { return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) } memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) if err != nil { return warnings, errors.Wrapf(err, "Invalid value %s for cpuset mems", resources.CpusetMems) } if !memsAvailable { return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) } // blkio subsystem checks and adjustments if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") resources.BlkioWeight = 0 } if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") } if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) } if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} } if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.") resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} } return warnings, nil } func (daemon *Daemon) getCgroupDriver() string { if UsingSystemd(daemon.configStore) { return cgroupSystemdDriver } if daemon.Rootless() { return cgroupNoneDriver } return cgroupFsDriver } // getCD gets the raw value of the native.cgroupdriver option, if set. func getCD(config *config.Config) string { for _, option := range config.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { continue } return val } return "" } // verifyCgroupDriver validates native.cgroupdriver func verifyCgroupDriver(config *config.Config) error { cd := getCD(config) if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { return nil } if cd == cgroupNoneDriver { return fmt.Errorf("native.cgroupdriver option %s is internally used and cannot be specified manually", cd) } return fmt.Errorf("native.cgroupdriver option %s not supported", cd) } // UsingSystemd returns true if cli option includes native.cgroupdriver=systemd func UsingSystemd(config *config.Config) bool { cd := getCD(config) if cd == cgroupSystemdDriver { return true } // On cgroup v2 hosts, default to systemd driver if cd == "" && cgroups.Mode() == cgroups.Unified && isRunningSystemd() { return true } return false } var ( runningSystemd bool detectSystemd sync.Once ) // isRunningSystemd checks whether the host was booted with systemd as its init // system. This functions similarly to systemd's `sd_booted(3)`: internally, it // checks whether /run/systemd/system/ exists and is a directory. // http://www.freedesktop.org/software/systemd/man/sd_booted.html // // NOTE: This function comes from package github.com/coreos/go-systemd/util // It was borrowed here to avoid a dependency on cgo. func isRunningSystemd() bool { detectSystemd.Do(func() { fi, err := os.Lstat("/run/systemd/system") if err != nil { return } runningSystemd = fi.IsDir() }) return runningSystemd } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) { if hostConfig == nil { return nil, nil } sysInfo := daemon.RawSysInfo() w, err := verifyPlatformContainerResources(&hostConfig.Resources, sysInfo, update) // no matter err is nil or not, w could have data in itself. warnings = append(warnings, w...) if err != nil { return warnings, err } if hostConfig.ShmSize < 0 { return warnings, fmt.Errorf("SHM size can not be less than 0") } if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) } // ip-forwarding does not affect container with '--net=host' (or '--net=none') if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") } if hostConfig.NetworkMode.IsHost() && len(hostConfig.PortBindings) > 0 { warnings = append(warnings, "Published ports are discarded when using host network mode") } // check for various conflicting options with user namespaces if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { if hostConfig.Privileged { return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces. You must run the container in the host namespace when running privileged mode") } if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("cannot share the host's network namespace when user namespaces are enabled") } if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled") } } if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { // CgroupParent for systemd cgroup should be named as "xxx.slice" if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if hostConfig.Runtime == "" { hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() } if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) } parser := volumemounts.NewParser() for dest := range hostConfig.Tmpfs { if err := parser.ValidateTmpfsMountDestination(dest); err != nil { return warnings, err } } if !hostConfig.CgroupnsMode.Valid() { return warnings, fmt.Errorf("invalid cgroup namespace mode: %v", hostConfig.CgroupnsMode) } if hostConfig.CgroupnsMode.IsPrivate() { if !sysInfo.CgroupNamespaces { warnings = append(warnings, "Your kernel does not support cgroup namespaces. Cgroup namespace setting discarded.") } } if hostConfig.Runtime == config.LinuxV1RuntimeName || (hostConfig.Runtime == "" && daemon.configStore.DefaultRuntime == config.LinuxV1RuntimeName) { warnings = append(warnings, fmt.Sprintf("Configured runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName)) } return warnings, nil } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(conf *config.Config) error { if conf.ContainerdNamespace == conf.ContainerdPluginNamespace { return errors.New("containers namespace and plugins namespace cannot be the same") } // Check for mutually incompatible config options if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" { return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") } if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication { return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") } if conf.BridgeConfig.EnableIP6Tables && !conf.Experimental { return fmt.Errorf("ip6tables rules are only available if experimental features are enabled") } if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq { conf.BridgeConfig.EnableIPMasq = false } if err := verifyCgroupDriver(conf); err != nil { return err } if conf.CgroupParent != "" && UsingSystemd(conf) { if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") { return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if conf.Rootless && UsingSystemd(conf) && cgroups.Mode() != cgroups.Unified { return fmt.Errorf("exec-opt native.cgroupdriver=systemd requires cgroup v2 for rootless mode") } configureRuntimes(conf) if rtName := conf.GetDefaultRuntimeName(); rtName != "" { if conf.GetRuntime(rtName) == nil { return fmt.Errorf("specified default runtime '%s' does not exist", rtName) } if rtName == config.LinuxV1RuntimeName { logrus.Warnf("Configured default runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName) } } return nil } // checkSystem validates platform-specific requirements func checkSystem() error { return checkKernel() } // configureMaxThreads sets the Go runtime max threads threshold // which is 90% of the kernel setting from /proc/sys/kernel/threads-max func configureMaxThreads(config *config.Config) error { mt, err := os.ReadFile("/proc/sys/kernel/threads-max") if err != nil { return err } mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) if err != nil { return err } maxThreads := (mtint / 100) * 90 debug.SetMaxThreads(maxThreads) logrus.Debugf("Golang's threads limit set to %d", maxThreads) return nil } func overlaySupportsSelinux() (bool, error) { f, err := os.Open("/proc/kallsyms") if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.HasSuffix(s.Text(), " security_inode_copy_up") { return true, nil } } return false, s.Err() } // configureKernelSecuritySupport configures and validates security support for the kernel func configureKernelSecuritySupport(config *config.Config, driverName string) error { if config.EnableSelinuxSupport { if !selinux.GetEnabled() { logrus.Warn("Docker could not enable SELinux on the host system") return nil } if driverName == "overlay" || driverName == "overlay2" { // If driver is overlay or overlay2, make sure kernel // supports selinux with overlay. supported, err := overlaySupportsSelinux() if err != nil { return err } if !supported { logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName) } } } else { selinux.SetDisabled() } return nil } func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) if err != nil { return nil, err } controller, err := libnetwork.New(netOptions...) if err != nil { return nil, fmt.Errorf("error obtaining controller instance: %v", err) } if len(activeSandboxes) > 0 { logrus.Info("There are old running containers, the network config will not take affect") setHostGatewayIP(daemon.configStore, controller) return controller, nil } // Initialize default network on "null" if n, _ := controller.NetworkByName("none"); n == nil { if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) } } // Initialize default network on "host" if n, _ := controller.NetworkByName("host"); n == nil { if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) } } // Clear stale bridge network if n, err := controller.NetworkByName("bridge"); err == nil { if err = n.Delete(); err != nil { return nil, fmt.Errorf("could not delete the default bridge network: %v", err) } if len(config.NetworkConfig.DefaultAddressPools.Value()) > 0 && !daemon.configStore.LiveRestoreEnabled { removeDefaultBridgeInterface() } } if !config.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, config); err != nil { return nil, err } } else { removeDefaultBridgeInterface() } // Set HostGatewayIP to the default bridge's IP if it is empty setHostGatewayIP(daemon.configStore, controller) return controller, nil } // setHostGatewayIP sets cfg.HostGatewayIP to the default bridge's IP if it is empty. func setHostGatewayIP(config *config.Config, controller libnetwork.NetworkController) { if config.HostGatewayIP != nil { return } if n, err := controller.NetworkByName("bridge"); err == nil { v4Info, v6Info := n.Info().IpamInfo() var gateway net.IP if len(v4Info) > 0 { gateway = v4Info[0].Gateway.IP } else if len(v6Info) > 0 { gateway = v6Info[0].Gateway.IP } config.HostGatewayIP = gateway } } func driverOptions(config *config.Config) nwconfig.Option { return nwconfig.OptionDriverConfig("bridge", options.Generic{ netlabel.GenericData: options.Generic{ "EnableIPForwarding": config.BridgeConfig.EnableIPForward, "EnableIPTables": config.BridgeConfig.EnableIPTables, "EnableIP6Tables": config.BridgeConfig.EnableIP6Tables, "EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy, "UserlandProxyPath": config.BridgeConfig.UserlandProxyPath, }, }) } func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { bridgeName := bridge.DefaultBridgeName if config.BridgeConfig.Iface != "" { bridgeName = config.BridgeConfig.Iface } netOption := map[string]string{ bridge.BridgeName: bridgeName, bridge.DefaultBridge: strconv.FormatBool(true), netlabel.DriverMTU: strconv.Itoa(config.Mtu), bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq), bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication), } // --ip processing if config.BridgeConfig.DefaultIP != nil { netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String() } ipamV4Conf := &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) if err != nil { return errors.Wrap(err, "list bridge addresses failed") } nw := nwList[0] if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" { _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return errors.Wrap(err, "parse CIDR failed") } // Iterate through in case there are multiple addresses for the bridge for _, entry := range nwList { if fCIDR.Contains(entry.IP) { nw = entry break } } } ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) if hip.IsGlobalUnicast() { ipamV4Conf.Gateway = nw.IP.String() } if config.BridgeConfig.IP != "" { ip, ipNet, err := net.ParseCIDR(config.BridgeConfig.IP) if err != nil { return err } ipamV4Conf.PreferredPool = ipNet.String() ipamV4Conf.Gateway = ip.String() } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) } if config.BridgeConfig.FixedCIDR != "" { _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return err } ipamV4Conf.SubPool = fCIDR.String() } if config.BridgeConfig.DefaultGatewayIPv4 != nil { ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String() } var ( deferIPv6Alloc bool ipamV6Conf *libnetwork.IpamConf ) if config.BridgeConfig.EnableIPv6 && config.BridgeConfig.FixedCIDRv6 == "" { return errdefs.InvalidParameter(errors.New("IPv6 is enabled for the default bridge, but no subnet is configured. Specify an IPv6 subnet using --fixed-cidr-v6")) } else if config.BridgeConfig.FixedCIDRv6 != "" { _, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6) if err != nil { return err } // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has // at least 48 host bits, we need to guarantee the current behavior where the containers' // IPv6 addresses will be constructed based on the containers' interface MAC address. // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints // on this network until after the driver has created the endpoint and returned the // constructed address. Libnetwork will then reserve this address with the ipam driver. ones, _ := fCIDRv6.Mask.Size() deferIPv6Alloc = ones <= 80 ipamV6Conf = &libnetwork.IpamConf{ AuxAddresses: make(map[string]string), PreferredPool: fCIDRv6.String(), } // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 // address belongs to the same network, we need to inform libnetwork about it, so // that it can be reserved with IPAM and it will not be given away to somebody else for _, nw6 := range nw6List { if fCIDRv6.Contains(nw6.IP) { ipamV6Conf.Gateway = nw6.IP.String() break } } } if config.BridgeConfig.DefaultGatewayIPv6 != nil { if ipamV6Conf == nil { ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} } ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String() } v4Conf := []*libnetwork.IpamConf{ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} if ipamV6Conf != nil { v6Conf = append(v6Conf, ipamV6Conf) } // Initialize default network on "bridge" with the same name _, err = controller.NewNetwork("bridge", "bridge", "", libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6), libnetwork.NetworkOptionDriverOpts(netOption), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) if err != nil { return fmt.Errorf("Error creating default \"bridge\" network: %v", err) } return nil } // Remove default bridge interface if present (--bridge=none use case) func removeDefaultBridgeInterface() { if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil { if err := netlink.LinkDel(lnk); err != nil { logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) } } } func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error { return func(initPath containerfs.ContainerFS) error { return initlayer.Setup(initPath, idMapping.RootPair()) } } // Parse the remapped root (user namespace) option, which can be one of: // username - valid username from /etc/passwd // username:groupname - valid username; valid groupname from /etc/group // uid - 32-bit unsigned int valid Linux UID value // uid:gid - uid value; 32-bit unsigned int Linux GID value // // If no groupname is specified, and a username is specified, an attempt // will be made to lookup a gid for that username as a groupname // // If names are used, they are verified to exist in passwd/group func parseRemappedRoot(usergrp string) (string, string, error) { var ( userID, groupID int username, groupname string ) idparts := strings.Split(usergrp, ":") if len(idparts) > 2 { return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) } if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { // must be a uid; take it as valid userID = int(uid) luser, err := idtools.LookupUID(userID) if err != nil { return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) } username = luser.Name if len(idparts) == 1 { // if the uid was numeric and no gid was specified, take the uid as the gid groupID = userID lgrp, err := idtools.LookupGID(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) } groupname = lgrp.Name } } else { lookupName := idparts[0] // special case: if the user specified "default", they want Docker to create or // use (after creation) the "dockremap" user/group for root remapping if lookupName == defaultIDSpecifier { lookupName = defaultRemappedID } luser, err := idtools.LookupUser(lookupName) if err != nil && idparts[0] != defaultIDSpecifier { // error if the name requested isn't the special "dockremap" ID return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) } else if err != nil { // special case-- if the username == "default", then we have been asked // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} // ranges will be used for the user and group mappings in user namespaced containers _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) if err == nil { return defaultRemappedID, defaultRemappedID, nil } return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) } username = luser.Name if len(idparts) == 1 { // we only have a string username, and no group specified; look up gid from username as group group, err := idtools.LookupGroup(lookupName) if err != nil { return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) } groupname = group.Name } } if len(idparts) == 2 { // groupname or gid is separately specified and must be resolved // to an unsigned 32-bit gid if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { // must be a gid, take it as valid groupID = int(gid) lgrp, err := idtools.LookupGID(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) } groupname = lgrp.Name } else { // not a number; attempt a lookup if _, err := idtools.LookupGroup(idparts[1]); err != nil { return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) } groupname = idparts[1] } } return username, groupname, nil } func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) { if runtime.GOOS != "linux" && config.RemappedRoot != "" { return nil, fmt.Errorf("User namespaces are only supported on Linux") } // if the daemon was started with remapped root option, parse // the config option to the int uid,gid values if config.RemappedRoot != "" { username, groupname, err := parseRemappedRoot(config.RemappedRoot) if err != nil { return nil, err } if username == "root" { // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op // effectively logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") return &idtools.IdentityMapping{}, nil } logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s", username) // update remapped root setting now that we have resolved them to actual names config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) mappings, err := idtools.NewIdentityMapping(username) if err != nil { return nil, errors.Wrap(err, "Can't create ID mappings") } return mappings, nil } return &idtools.IdentityMapping{}, nil } func setupDaemonRoot(config *config.Config, rootDir string, remappedRoot idtools.Identity) error { config.Root = rootDir // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) // so that syscalls executing as non-root, operating on subdirectories of the graph root // (e.g. mounted layers of a container) can traverse this path. // The user namespace support will create subdirectories for the remapped root host uid:gid // pair owned by that same uid:gid pair for proper write access to those needed metadata and // layer content subtrees. if _, err := os.Stat(rootDir); err == nil { // root current exists; verify the access bits are correct by setting them if err = os.Chmod(rootDir, 0711); err != nil { return err } } else if os.IsNotExist(err) { // no root exists yet, create it 0711 with root:root ownership if err := os.MkdirAll(rootDir, 0711); err != nil { return err } } id := idtools.Identity{UID: idtools.CurrentIdentity().UID, GID: remappedRoot.GID} // First make sure the current root dir has the correct perms. if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil { return errors.Wrapf(err, "could not create or set daemon root permissions: %s", config.Root) } // if user namespaces are enabled we will create a subtree underneath the specified root // with any/all specified remapped root uid/gid options on the daemon creating // a new subdirectory with ownership set to the remapped uid/gid (so as to allow // `chdir()` to work for containers namespaced to that uid/gid) if config.RemappedRoot != "" { config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", remappedRoot.UID, remappedRoot.GID)) logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) // Create the root directory if it doesn't exist if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil { return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) } // we also need to verify that any pre-existing directories in the path to // the graphroot won't block access to remapped root--if any pre-existing directory // has strict permissions that don't allow "x", container start will fail, so // better to warn and fail now dirPath := config.Root for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if !idtools.CanAccess(dirPath, remappedRoot) { return fmt.Errorf("a subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories", config.Root) } } } if err := setupDaemonRootPropagation(config); err != nil { logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior") } return nil } func setupDaemonRootPropagation(cfg *config.Config) error { rootParentMount, mountOptions, err := getSourceMount(cfg.Root) if err != nil { return errors.Wrap(err, "error getting daemon root's parent mount") } var cleanupOldFile bool cleanupFile := getUnmountOnShutdownPath(cfg) defer func() { if !cleanupOldFile { return } if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) { logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file") } }() if hasMountInfoOption(mountOptions, sharedPropagationOption, slavePropagationOption) { cleanupOldFile = true return nil } if err := mount.MakeShared(cfg.Root); err != nil { return errors.Wrap(err, "could not setup daemon root propagation to shared") } // check the case where this may have already been a mount to itself. // If so then the daemon only performed a remount and should not try to unmount this later. if rootParentMount == cfg.Root { cleanupOldFile = true return nil } if err := os.MkdirAll(filepath.Dir(cleanupFile), 0700); err != nil { return errors.Wrap(err, "error creating dir to store mount cleanup file") } if err := os.WriteFile(cleanupFile, nil, 0600); err != nil { return errors.Wrap(err, "error writing file to signal mount cleanup on shutdown") } return nil } // getUnmountOnShutdownPath generates the path to used when writing the file that signals to the daemon that on shutdown // the daemon root should be unmounted. func getUnmountOnShutdownPath(config *config.Config) string { return filepath.Join(config.ExecRoot, "unmount-on-shutdown") } // registerLinks writes the links to a file. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { return nil } for _, l := range hostConfig.Links { name, alias, err := opts.ParseLink(l) if err != nil { return err } child, err := daemon.GetContainer(name) if err != nil { if errdefs.IsNotFound(err) { // Trying to link to a non-existing container is not valid, and // should return an "invalid parameter" error. Returning a "not // found" error here would make the client report the container's // image could not be found (see moby/moby#39823) err = errdefs.InvalidParameter(err) } return errors.Wrapf(err, "could not get container for %s", name) } for child.HostConfig.NetworkMode.IsContainer() { parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) child, err = daemon.GetContainer(parts[1]) if err != nil { if errdefs.IsNotFound(err) { // Trying to link to a non-existing container is not valid, and // should return an "invalid parameter" error. Returning a "not // found" error here would make the client report the container's // image could not be found (see moby/moby#39823) err = errdefs.InvalidParameter(err) } return errors.Wrapf(err, "Could not get container for %s", parts[1]) } } if child.HostConfig.NetworkMode.IsHost() { return runconfig.ErrConflictHostNetworkAndLinks } if err := daemon.registerLink(container, child, alias); err != nil { return err } } // After we load all the links into the daemon // set them to nil on the hostconfig _, err := container.WriteHostConfig() return err } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { return daemon.Unmount(container) } func copyBlkioEntry(entries []*statsV1.BlkIOEntry) []types.BlkioStatEntry { out := make([]types.BlkioStatEntry, len(entries)) for i, re := range entries { out[i] = types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: re.Op, Value: re.Value, } } return out } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning(c.ID) } cs, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { if strings.Contains(err.Error(), "container not found") { return nil, containerNotFound(c.ID) } return nil, err } s := &types.StatsJSON{} s.Read = cs.Read stats := cs.Metrics switch t := stats.(type) { case *statsV1.Metrics: return daemon.statsV1(s, t) case *statsV2.Metrics: return daemon.statsV2(s, t) default: return nil, errors.Errorf("unexpected type of metrics %+v", t) } } func (daemon *Daemon) statsV1(s *types.StatsJSON, stats *statsV1.Metrics) (*types.StatsJSON, error) { if stats.Blkio != nil { s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: copyBlkioEntry(stats.Blkio.IoServiceBytesRecursive), IoServicedRecursive: copyBlkioEntry(stats.Blkio.IoServicedRecursive), IoQueuedRecursive: copyBlkioEntry(stats.Blkio.IoQueuedRecursive), IoServiceTimeRecursive: copyBlkioEntry(stats.Blkio.IoServiceTimeRecursive), IoWaitTimeRecursive: copyBlkioEntry(stats.Blkio.IoWaitTimeRecursive), IoMergedRecursive: copyBlkioEntry(stats.Blkio.IoMergedRecursive), IoTimeRecursive: copyBlkioEntry(stats.Blkio.IoTimeRecursive), SectorsRecursive: copyBlkioEntry(stats.Blkio.SectorsRecursive), } } if stats.CPU != nil { s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: stats.CPU.Usage.Total, PercpuUsage: stats.CPU.Usage.PerCPU, UsageInKernelmode: stats.CPU.Usage.Kernel, UsageInUsermode: stats.CPU.Usage.User, }, ThrottlingData: types.ThrottlingData{ Periods: stats.CPU.Throttling.Periods, ThrottledPeriods: stats.CPU.Throttling.ThrottledPeriods, ThrottledTime: stats.CPU.Throttling.ThrottledTime, }, } } if stats.Memory != nil { raw := map[string]uint64{ "cache": stats.Memory.Cache, "rss": stats.Memory.RSS, "rss_huge": stats.Memory.RSSHuge, "mapped_file": stats.Memory.MappedFile, "dirty": stats.Memory.Dirty, "writeback": stats.Memory.Writeback, "pgpgin": stats.Memory.PgPgIn, "pgpgout": stats.Memory.PgPgOut, "pgfault": stats.Memory.PgFault, "pgmajfault": stats.Memory.PgMajFault, "inactive_anon": stats.Memory.InactiveAnon, "active_anon": stats.Memory.ActiveAnon, "inactive_file": stats.Memory.InactiveFile, "active_file": stats.Memory.ActiveFile, "unevictable": stats.Memory.Unevictable, "hierarchical_memory_limit": stats.Memory.HierarchicalMemoryLimit, "hierarchical_memsw_limit": stats.Memory.HierarchicalSwapLimit, "total_cache": stats.Memory.TotalCache, "total_rss": stats.Memory.TotalRSS, "total_rss_huge": stats.Memory.TotalRSSHuge, "total_mapped_file": stats.Memory.TotalMappedFile, "total_dirty": stats.Memory.TotalDirty, "total_writeback": stats.Memory.TotalWriteback, "total_pgpgin": stats.Memory.TotalPgPgIn, "total_pgpgout": stats.Memory.TotalPgPgOut, "total_pgfault": stats.Memory.TotalPgFault, "total_pgmajfault": stats.Memory.TotalPgMajFault, "total_inactive_anon": stats.Memory.TotalInactiveAnon, "total_active_anon": stats.Memory.TotalActiveAnon, "total_inactive_file": stats.Memory.TotalInactiveFile, "total_active_file": stats.Memory.TotalActiveFile, "total_unevictable": stats.Memory.TotalUnevictable, } if stats.Memory.Usage != nil { s.MemoryStats = types.MemoryStats{ Stats: raw, Usage: stats.Memory.Usage.Usage, MaxUsage: stats.Memory.Usage.Max, Limit: stats.Memory.Usage.Limit, Failcnt: stats.Memory.Usage.Failcnt, } } else { s.MemoryStats = types.MemoryStats{ Stats: raw, } } // if the container does not set memory limit, use the machineMemory if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { s.MemoryStats.Limit = daemon.machineMemory } } if stats.Pids != nil { s.PidsStats = types.PidsStats{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, } } return s, nil } func (daemon *Daemon) statsV2(s *types.StatsJSON, stats *statsV2.Metrics) (*types.StatsJSON, error) { if stats.Io != nil { var isbr []types.BlkioStatEntry for _, re := range stats.Io.Usage { isbr = append(isbr, types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: "read", Value: re.Rbytes, }, types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: "write", Value: re.Wbytes, }, ) } s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: isbr, // Other fields are unsupported } } if stats.CPU != nil { s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: stats.CPU.UsageUsec * 1000, // PercpuUsage is not supported UsageInKernelmode: stats.CPU.SystemUsec * 1000, UsageInUsermode: stats.CPU.UserUsec * 1000, }, ThrottlingData: types.ThrottlingData{ Periods: stats.CPU.NrPeriods, ThrottledPeriods: stats.CPU.NrThrottled, ThrottledTime: stats.CPU.ThrottledUsec * 1000, }, } } if stats.Memory != nil { s.MemoryStats = types.MemoryStats{ // Stats is not compatible with v1 Stats: map[string]uint64{ "anon": stats.Memory.Anon, "file": stats.Memory.File, "kernel_stack": stats.Memory.KernelStack, "slab": stats.Memory.Slab, "sock": stats.Memory.Sock, "shmem": stats.Memory.Shmem, "file_mapped": stats.Memory.FileMapped, "file_dirty": stats.Memory.FileDirty, "file_writeback": stats.Memory.FileWriteback, "anon_thp": stats.Memory.AnonThp, "inactive_anon": stats.Memory.InactiveAnon, "active_anon": stats.Memory.ActiveAnon, "inactive_file": stats.Memory.InactiveFile, "active_file": stats.Memory.ActiveFile, "unevictable": stats.Memory.Unevictable, "slab_reclaimable": stats.Memory.SlabReclaimable, "slab_unreclaimable": stats.Memory.SlabUnreclaimable, "pgfault": stats.Memory.Pgfault, "pgmajfault": stats.Memory.Pgmajfault, "workingset_refault": stats.Memory.WorkingsetRefault, "workingset_activate": stats.Memory.WorkingsetActivate, "workingset_nodereclaim": stats.Memory.WorkingsetNodereclaim, "pgrefill": stats.Memory.Pgrefill, "pgscan": stats.Memory.Pgscan, "pgsteal": stats.Memory.Pgsteal, "pgactivate": stats.Memory.Pgactivate, "pgdeactivate": stats.Memory.Pgdeactivate, "pglazyfree": stats.Memory.Pglazyfree, "pglazyfreed": stats.Memory.Pglazyfreed, "thp_fault_alloc": stats.Memory.ThpFaultAlloc, "thp_collapse_alloc": stats.Memory.ThpCollapseAlloc, }, Usage: stats.Memory.Usage, // MaxUsage is not supported Limit: stats.Memory.UsageLimit, } // if the container does not set memory limit, use the machineMemory if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { s.MemoryStats.Limit = daemon.machineMemory } if stats.MemoryEvents != nil { // Failcnt is set to the "oom" field of the "memory.events" file. // See https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html s.MemoryStats.Failcnt = stats.MemoryEvents.Oom } } if stats.Pids != nil { s.PidsStats = types.PidsStats{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, } } return s, nil } // setDefaultIsolation determines the default isolation mode for the // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { return nil } // setupDaemonProcess sets various settings for the daemon's process func setupDaemonProcess(config *config.Config) error { // setup the daemons oom_score_adj if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil { return err } if err := setMayDetachMounts(); err != nil { logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter") } return nil } // This is used to allow removal of mountpoints that may be mounted in other // namespaces on RHEL based kernels starting from RHEL 7.4. // Without this setting, removals on these RHEL based kernels may fail with // "device or resource busy". // This setting is not available in upstream kernels as it is not configurable, // but has been in the upstream kernels since 3.15. func setMayDetachMounts() error { f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0) if err != nil { if os.IsNotExist(err) { return nil } return errors.Wrap(err, "error opening may_detach_mounts kernel config file") } defer f.Close() _, err = f.WriteString("1") if os.IsPermission(err) { // Setting may_detach_mounts does not work in an // unprivileged container. Ignore the error, but log // it if we appear not to be in that situation. if !userns.RunningInUserNS() { logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1") } return nil } return err } func setupOOMScoreAdj(score int) error { if score == 0 { return nil } f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) if err != nil { return err } defer f.Close() stringScore := strconv.Itoa(score) _, err = f.WriteString(stringScore) if os.IsPermission(err) { // Setting oom_score_adj does not work in an // unprivileged container. Ignore the error, but log // it if we appear not to be in that situation. if !userns.RunningInUserNS() { logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore) } return nil } return err } func (daemon *Daemon) initCPURtController(mnt, path string) error { if path == "/" || path == "." { return nil } // Recursively create cgroup to ensure that the system and all parent cgroups have values set // for the period and runtime as this limits what the children can be set to. if err := daemon.initCPURtController(mnt, filepath.Dir(path)); err != nil { return err } path = filepath.Join(mnt, path) if err := os.MkdirAll(path, 0755); err != nil { return err } if err := maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil { return err } return maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path) } func maybeCreateCPURealTimeFile(configValue int64, file string, path string) error { if configValue == 0 { return nil } return os.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700) } func (daemon *Daemon) setupSeccompProfile() error { switch profile := daemon.configStore.SeccompProfile; profile { case "", config.SeccompProfileDefault: daemon.seccompProfilePath = config.SeccompProfileDefault case config.SeccompProfileUnconfined: daemon.seccompProfilePath = config.SeccompProfileUnconfined default: daemon.seccompProfilePath = profile b, err := os.ReadFile(profile) if err != nil { return fmt.Errorf("opening seccomp profile (%s) failed: %v", profile, err) } daemon.seccompProfile = b } return nil } // RawSysInfo returns *sysinfo.SysInfo . func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo { var siOpts []sysinfo.Opt if daemon.getCgroupDriver() == cgroupSystemdDriver { if euid := os.Getenv("ROOTLESSKIT_PARENT_EUID"); euid != "" { siOpts = append(siOpts, sysinfo.WithCgroup2GroupPath("/user.slice/user-"+euid+".slice")) } } return sysinfo.New(siOpts...) } func recursiveUnmount(target string) error { return mount.RecursiveUnmount(target) } func (daemon *Daemon) initLibcontainerd(ctx context.Context) error { var err error daemon.containerd, err = remote.NewClient( ctx, daemon.containerdCli, filepath.Join(daemon.configStore.ExecRoot, "containerd"), daemon.configStore.ContainerdNamespace, daemon, ) return err }
sanchayanghosh
58e68789bf37b1484e773a3db99146ae36942295
40ccedd61b9d8b64fc75d64cd57b9ee35c769632
Not related to your change, but It's actually odd that we're passing `config` as argument to `initNetworkController()`, because it's already available through `daemon`. We should probably change that (in a follow-up PR) to _either_ remove `daemon` _or_ the argument; https://github.com/moby/moby/blob/9e7bbdb9ba53440cfc67e2e1de89e35e44c43ddf/daemon/daemon.go#L475
thaJeztah
4,484
moby/moby
42,782
Remove platform argument from Puller interface.
The `platform` argument is unneeded because `ImagePullConfig` is already in the `v2Puller` struct. <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Remove platform argument from Puller interface. **- How I did it** The platform option was introduced in this [contribution](https://github.com/moby/moby/commit/0380fbff37922cadf294851b1546f4c212c7f364#diff-cd6eca1d1dbceb8e107577c74bf1760760204df66f0ff833c8ca35291e40cad0R24), but I don't think it's necessary, we can just use ImagePullConfig directly. **- How to verify it** **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-08-25 04:06:26+00:00
2021-08-30 11:19:08+00:00
distribution/pull_v2.go
package distribution // import "github.com/docker/docker/distribution" import ( "context" "encoding/json" "fmt" "io" "os" "runtime" "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/docker/distribution" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/ocischema" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/image" v1 "github.com/docker/docker/image/v1" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/system" refstore "github.com/docker/docker/reference" "github.com/docker/docker/registry" digest "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) var ( errRootFSMismatch = errors.New("layers from manifest don't match image configuration") errRootFSInvalid = errors.New("invalid rootfs in image configuration") ) // ImageConfigPullError is an error pulling the image config blob // (only applies to schema2). type ImageConfigPullError struct { Err error } // Error returns the error string for ImageConfigPullError. func (e ImageConfigPullError) Error() string { return "error pulling image configuration: " + e.Err.Error() } type v2Puller struct { V2MetadataService metadata.V2MetadataService endpoint registry.APIEndpoint config *ImagePullConfig repoInfo *registry.RepositoryInfo repo distribution.Repository manifestStore *manifestStore } func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, platform *specs.Platform) (err error) { // TODO(tiborvass): was ReceiveTimeout p.repo, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") if err != nil { logrus.Warnf("Error getting v2 registry: %v", err) return err } p.manifestStore.remote, err = p.repo.Manifests(ctx) if err != nil { return err } if err = p.pullV2Repository(ctx, ref, platform); err != nil { if _, ok := err.(fallbackError); ok { return err } if continueOnError(err, p.endpoint.Mirror) { return fallbackError{ err: err, transportOK: true, } } } return err } func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, platform *specs.Platform) (err error) { var layersDownloaded bool if !reference.IsNameOnly(ref) { layersDownloaded, err = p.pullV2Tag(ctx, ref, platform) if err != nil { return err } } else { tags, err := p.repo.Tags(ctx).All(ctx) if err != nil { return err } for _, tag := range tags { tagRef, err := reference.WithTag(ref, tag) if err != nil { return err } pulledNew, err := p.pullV2Tag(ctx, tagRef, platform) if err != nil { // Since this is the pull-all-tags case, don't // allow an error pulling a particular tag to // make the whole pull fall back to v1. if fallbackErr, ok := err.(fallbackError); ok { return fallbackErr.err } return err } // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? layersDownloaded = layersDownloaded || pulledNew } } writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) return nil } type v2LayerDescriptor struct { digest digest.Digest diffID layer.DiffID repoInfo *registry.RepositoryInfo repo distribution.Repository V2MetadataService metadata.V2MetadataService tmpFile *os.File verifier digest.Verifier src distribution.Descriptor } func (ld *v2LayerDescriptor) Key() string { return "v2:" + ld.digest.String() } func (ld *v2LayerDescriptor) ID() string { return stringid.TruncateID(ld.digest.String()) } func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { if ld.diffID != "" { return ld.diffID, nil } return ld.V2MetadataService.GetDiffID(ld.digest) } func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { logrus.Debugf("pulling blob %q", ld.digest) var ( err error offset int64 ) if ld.tmpFile == nil { ld.tmpFile, err = createDownloadFile() if err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } } else { offset, err = ld.tmpFile.Seek(0, io.SeekEnd) if err != nil { logrus.Debugf("error seeking to end of download file: %v", err) offset = 0 ld.tmpFile.Close() if err := os.Remove(ld.tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) } ld.tmpFile, err = createDownloadFile() if err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } } else if offset != 0 { logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) } } tmpFile := ld.tmpFile layerDownload, err := ld.open(ctx) if err != nil { logrus.Errorf("Error initiating layer download: %v", err) return nil, 0, retryOnError(err) } if offset != 0 { _, err := layerDownload.Seek(offset, io.SeekStart) if err != nil { if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } return nil, 0, err } } size, err := layerDownload.Seek(0, io.SeekEnd) if err != nil { // Seek failed, perhaps because there was no Content-Length // header. This shouldn't fail the download, because we can // still continue without a progress bar. size = 0 } else { if size != 0 && offset > size { logrus.Debug("Partial download is larger than full blob. Starting over") offset = 0 if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } } // Restore the seek offset either at the beginning of the // stream, or just after the last byte we have from previous // attempts. _, err = layerDownload.Seek(offset, io.SeekStart) if err != nil { return nil, 0, err } } reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") defer reader.Close() if ld.verifier == nil { ld.verifier = ld.digest.Verifier() } _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) if err != nil { if err == transport.ErrWrongCodeForByteRange { if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } return nil, 0, err } return nil, 0, retryOnError(err) } progress.Update(progressOutput, ld.ID(), "Verifying Checksum") if !ld.verifier.Verified() { err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) logrus.Error(err) // Allow a retry if this digest verification error happened // after a resumed download. if offset != 0 { if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } return nil, 0, err } return nil, 0, xfer.DoNotRetry{Err: err} } progress.Update(progressOutput, ld.ID(), "Download complete") logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) _, err = tmpFile.Seek(0, io.SeekStart) if err != nil { tmpFile.Close() if err := os.Remove(tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) } ld.tmpFile = nil ld.verifier = nil return nil, 0, xfer.DoNotRetry{Err: err} } // hand off the temporary file to the download manager, so it will only // be closed once ld.tmpFile = nil return ioutils.NewReadCloserWrapper(tmpFile, func() error { tmpFile.Close() err := os.RemoveAll(tmpFile.Name()) if err != nil { logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) } return err }), size, nil } func (ld *v2LayerDescriptor) Close() { if ld.tmpFile != nil { ld.tmpFile.Close() if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) } } } func (ld *v2LayerDescriptor) truncateDownloadFile() error { // Need a new hash context since we will be redoing the download ld.verifier = nil if _, err := ld.tmpFile.Seek(0, io.SeekStart); err != nil { logrus.Errorf("error seeking to beginning of download file: %v", err) return err } if err := ld.tmpFile.Truncate(0); err != nil { logrus.Errorf("error truncating download file: %v", err) return err } return nil } func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { // Cache mapping from this layer's DiffID to the blobsum ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()}) } func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, platform *specs.Platform) (tagUpdated bool, err error) { var ( tagOrDigest string // Used for logging/progress only dgst digest.Digest mt string size int64 tagged reference.NamedTagged isTagged bool ) if digested, isDigested := ref.(reference.Canonical); isDigested { dgst = digested.Digest() tagOrDigest = digested.String() } else if tagged, isTagged = ref.(reference.NamedTagged); isTagged { tagService := p.repo.Tags(ctx) desc, err := tagService.Get(ctx, tagged.Tag()) if err != nil { return false, err } dgst = desc.Digest tagOrDigest = tagged.Tag() mt = desc.MediaType size = desc.Size } else { return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) } ctx = log.WithLogger(ctx, logrus.WithFields( logrus.Fields{ "digest": dgst, "remote": ref, })) desc := specs.Descriptor{ MediaType: mt, Digest: dgst, Size: size, } manifest, err := p.manifestStore.Get(ctx, desc) if err != nil { if isTagged && isNotFound(errors.Cause(err)) { logrus.WithField("ref", ref).WithError(err).Debug("Falling back to pull manifest by tag") msg := `%s Failed to pull manifest by the resolved digest. This registry does not appear to conform to the distribution registry specification; falling back to pull by tag. This fallback is DEPRECATED, and will be removed in a future release. Please contact admins of %s. %s ` warnEmoji := "\U000026A0\U0000FE0F" progress.Messagef(p.config.ProgressOutput, "WARNING", msg, warnEmoji, p.endpoint.URL, warnEmoji) // Fetch by tag worked, but fetch by digest didn't. // This is a broken registry implementation. // We'll fallback to the old behavior and get the manifest by tag. var ms distribution.ManifestService ms, err = p.repo.Manifests(ctx) if err != nil { return false, err } manifest, err = ms.Get(ctx, "", distribution.WithTag(tagged.Tag())) err = errors.Wrap(err, "error after falling back to get manifest by tag") } if err != nil { return false, err } } if manifest == nil { return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) } if m, ok := manifest.(*schema2.DeserializedManifest); ok { var allowedMediatype bool for _, t := range p.config.Schema2Types { if m.Manifest.Config.MediaType == t { allowedMediatype = true break } } if !allowedMediatype { configClass := mediaTypeClasses[m.Manifest.Config.MediaType] if configClass == "" { configClass = "unknown" } return false, invalidManifestClassError{m.Manifest.Config.MediaType, configClass} } } logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) var ( id digest.Digest manifestDigest digest.Digest ) switch v := manifest.(type) { case *schema1.SignedManifest: if p.config.RequireSchema2 { return false, fmt.Errorf("invalid manifest: not schema2") } // give registries time to upgrade to schema2 and only warn if we know a registry has been upgraded long time ago // TODO: condition to be removed if reference.Domain(ref) == "docker.io" { msg := fmt.Sprintf("Image %s uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref) logrus.Warn(msg) progress.Message(p.config.ProgressOutput, "", msg) } id, manifestDigest, err = p.pullSchema1(ctx, ref, v, platform) if err != nil { return false, err } case *schema2.DeserializedManifest: id, manifestDigest, err = p.pullSchema2(ctx, ref, v, platform) if err != nil { return false, err } case *ocischema.DeserializedManifest: id, manifestDigest, err = p.pullOCI(ctx, ref, v, platform) if err != nil { return false, err } case *manifestlist.DeserializedManifestList: id, manifestDigest, err = p.pullManifestList(ctx, ref, v, platform) if err != nil { return false, err } default: return false, invalidManifestFormatError{} } progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) if p.config.ReferenceStore != nil { oldTagID, err := p.config.ReferenceStore.Get(ref) if err == nil { if oldTagID == id { return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) } } else if err != refstore.ErrDoesNotExist { return false, err } if canonical, ok := ref.(reference.Canonical); ok { if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { return false, err } } else { if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { return false, err } if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { return false, err } } } return true, nil } func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { if platform != nil { // Early bath if the requested OS doesn't match that of the configuration. // This avoids doing the download, only to potentially fail later. if !system.IsOSSupported(platform.OS) { return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", runtime.GOOS, platform.OS) } } var verifiedManifest *schema1.Manifest verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) if err != nil { return "", "", err } rootFS := image.NewRootFS() // remove duplicate layers and check parent chain validity err = fixManifestLayers(verifiedManifest) if err != nil { return "", "", err } var descriptors []xfer.DownloadDescriptor // Image history converted to the new format var history []image.History // Note that the order of this loop is in the direction of bottom-most // to top-most, so that the downloads slice gets ordered correctly. for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { blobSum := verifiedManifest.FSLayers[i].BlobSum if err = blobSum.Validate(); err != nil { return "", "", errors.Wrapf(err, "could not validate layer digest %q", blobSum) } var throwAway struct { ThrowAway bool `json:"throwaway,omitempty"` } if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { return "", "", err } h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) if err != nil { return "", "", err } history = append(history, h) if throwAway.ThrowAway { continue } layerDescriptor := &v2LayerDescriptor{ digest: blobSum, repoInfo: p.repoInfo, repo: p.repo, V2MetadataService: p.V2MetadataService, } descriptors = append(descriptors, layerDescriptor) } resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, runtime.GOOS, descriptors, p.config.ProgressOutput) if err != nil { return "", "", err } defer release() config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) if err != nil { return "", "", err } imageID, err := p.config.ImageStore.Put(ctx, config) if err != nil { return "", "", err } manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) return imageID, manifestDigest, nil } func (p *v2Puller) pullSchema2Layers(ctx context.Context, target distribution.Descriptor, layers []distribution.Descriptor, platform *specs.Platform) (id digest.Digest, err error) { if _, err := p.config.ImageStore.Get(ctx, target.Digest); err == nil { // If the image already exists locally, no need to pull // anything. return target.Digest, nil } var descriptors []xfer.DownloadDescriptor // Note that the order of this loop is in the direction of bottom-most // to top-most, so that the downloads slice gets ordered correctly. for _, d := range layers { if err := d.Digest.Validate(); err != nil { return "", errors.Wrapf(err, "could not validate layer digest %q", d.Digest) } layerDescriptor := &v2LayerDescriptor{ digest: d.Digest, repo: p.repo, repoInfo: p.repoInfo, V2MetadataService: p.V2MetadataService, src: d, } descriptors = append(descriptors, layerDescriptor) } configChan := make(chan []byte, 1) configErrChan := make(chan error, 1) layerErrChan := make(chan error, 1) downloadsDone := make(chan struct{}) var cancel func() ctx, cancel = context.WithCancel(ctx) defer cancel() // Pull the image config go func() { configJSON, err := p.pullSchema2Config(ctx, target.Digest) if err != nil { configErrChan <- ImageConfigPullError{Err: err} cancel() return } configChan <- configJSON }() var ( configJSON []byte // raw serialized image config downloadedRootFS *image.RootFS // rootFS from registered layers configRootFS *image.RootFS // rootFS from configuration release func() // release resources from rootFS download configPlatform *specs.Platform // for LCOW when registering downloaded layers ) layerStoreOS := runtime.GOOS if platform != nil { layerStoreOS = platform.OS } // https://github.com/docker/docker/issues/24766 - Err on the side of caution, // explicitly blocking images intended for linux from the Windows daemon. On // Windows, we do this before the attempt to download, effectively serialising // the download slightly slowing it down. We have to do it this way, as // chances are the download of layers itself would fail due to file names // which aren't suitable for NTFS. At some point in the future, if a similar // check to block Windows images being pulled on Linux is implemented, it // may be necessary to perform the same type of serialisation. if runtime.GOOS == "windows" { configJSON, configRootFS, configPlatform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) if err != nil { return "", err } if configRootFS == nil { return "", errRootFSInvalid } if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil { return "", err } if len(descriptors) != len(configRootFS.DiffIDs) { return "", errRootFSMismatch } if platform == nil { // Early bath if the requested OS doesn't match that of the configuration. // This avoids doing the download, only to potentially fail later. if !system.IsOSSupported(configPlatform.OS) { return "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, layerStoreOS) } layerStoreOS = configPlatform.OS } // Populate diff ids in descriptors to avoid downloading foreign layers // which have been side loaded for i := range descriptors { descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i] } } if p.config.DownloadManager != nil { go func() { var ( err error rootFS image.RootFS ) downloadRootFS := *image.NewRootFS() rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, layerStoreOS, descriptors, p.config.ProgressOutput) if err != nil { // Intentionally do not cancel the config download here // as the error from config download (if there is one) // is more interesting than the layer download error layerErrChan <- err return } downloadedRootFS = &rootFS close(downloadsDone) }() } else { // We have nothing to download close(downloadsDone) } if configJSON == nil { configJSON, configRootFS, _, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) if err == nil && configRootFS == nil { err = errRootFSInvalid } if err != nil { cancel() select { case <-downloadsDone: case <-layerErrChan: } return "", err } } select { case <-downloadsDone: case err = <-layerErrChan: return "", err } if release != nil { defer release() } if downloadedRootFS != nil { // The DiffIDs returned in rootFS MUST match those in the config. // Otherwise the image config could be referencing layers that aren't // included in the manifest. if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { return "", errRootFSMismatch } for i := range downloadedRootFS.DiffIDs { if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { return "", errRootFSMismatch } } } imageID, err := p.config.ImageStore.Put(ctx, configJSON) if err != nil { return "", err } return imageID, nil } func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { manifestDigest, err = schema2ManifestDigest(ref, mfst) if err != nil { return "", "", err } id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform) return id, manifestDigest, err } func (p *v2Puller) pullOCI(ctx context.Context, ref reference.Named, mfst *ocischema.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { manifestDigest, err = schema2ManifestDigest(ref, mfst) if err != nil { return "", "", err } id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform) return id, manifestDigest, err } func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *specs.Platform, error) { select { case configJSON := <-configChan: rootfs, err := s.RootFSFromConfig(configJSON) if err != nil { return nil, nil, nil, err } platform, err := s.PlatformFromConfig(configJSON) if err != nil { return nil, nil, nil, err } return configJSON, rootfs, platform, nil case err := <-errChan: return nil, nil, nil, err // Don't need a case for ctx.Done in the select because cancellation // will trigger an error in p.pullSchema2ImageConfig. } } // pullManifestList handles "manifest lists" which point to various // platform-specific manifests. func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, pp *specs.Platform) (id digest.Digest, manifestListDigest digest.Digest, err error) { manifestListDigest, err = schema2ManifestDigest(ref, mfstList) if err != nil { return "", "", err } var platform specs.Platform if pp != nil { platform = *pp } logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s/%s match", ref, len(mfstList.Manifests), platforms.Format(platform), runtime.GOARCH) manifestMatches := filterManifests(mfstList.Manifests, platform) if len(manifestMatches) == 0 { errMsg := fmt.Sprintf("no matching manifest for %s in the manifest list entries", formatPlatform(platform)) logrus.Debugf(errMsg) return "", "", errors.New(errMsg) } if len(manifestMatches) > 1 { logrus.Debugf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String()) } match := manifestMatches[0] if err := checkImageCompatibility(match.Platform.OS, match.Platform.OSVersion); err != nil { return "", "", err } desc := specs.Descriptor{ Digest: match.Digest, Size: match.Size, MediaType: match.MediaType, } manifest, err := p.manifestStore.Get(ctx, desc) if err != nil { return "", "", err } manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), match.Digest) if err != nil { return "", "", err } switch v := manifest.(type) { case *schema1.SignedManifest: msg := fmt.Sprintf("[DEPRECATION NOTICE] v2 schema1 manifests in manifest lists are not supported and will break in a future release. Suggest author of %s to upgrade to v2 schema2. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref) logrus.Warn(msg) progress.Message(p.config.ProgressOutput, "", msg) platform := toOCIPlatform(manifestMatches[0].Platform) id, _, err = p.pullSchema1(ctx, manifestRef, v, &platform) if err != nil { return "", "", err } case *schema2.DeserializedManifest: platform := toOCIPlatform(manifestMatches[0].Platform) id, _, err = p.pullSchema2(ctx, manifestRef, v, &platform) if err != nil { return "", "", err } case *ocischema.DeserializedManifest: platform := toOCIPlatform(manifestMatches[0].Platform) id, _, err = p.pullOCI(ctx, manifestRef, v, &platform) if err != nil { return "", "", err } default: return "", "", errors.New("unsupported manifest format") } return id, manifestListDigest, err } func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { blobs := p.repo.Blobs(ctx) configJSON, err = blobs.Get(ctx, dgst) if err != nil { return nil, err } // Verify image config digest verifier := dgst.Verifier() if _, err := verifier.Write(configJSON); err != nil { return nil, err } if !verifier.Verified() { err := fmt.Errorf("image config verification failed for digest %s", dgst) logrus.Error(err) return nil, err } return configJSON, nil } // schema2ManifestDigest computes the manifest digest, and, if pulling by // digest, ensures that it matches the requested digest. func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { _, canonical, err := mfst.Payload() if err != nil { return "", err } // If pull by digest, then verify the manifest digest. if digested, isDigested := ref.(reference.Canonical); isDigested { verifier := digested.Digest().Verifier() if _, err := verifier.Write(canonical); err != nil { return "", err } if !verifier.Verified() { err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) logrus.Error(err) return "", err } return digested.Digest(), nil } return digest.FromBytes(canonical), nil } func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) { // If pull by digest, then verify the manifest digest. NOTE: It is // important to do this first, before any other content validation. If the // digest cannot be verified, don't even bother with those other things. if digested, isCanonical := ref.(reference.Canonical); isCanonical { verifier := digested.Digest().Verifier() if _, err := verifier.Write(signedManifest.Canonical); err != nil { return nil, err } if !verifier.Verified() { err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) logrus.Error(err) return nil, err } } m = &signedManifest.Manifest if m.SchemaVersion != 1 { return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref)) } if len(m.FSLayers) != len(m.History) { return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref)) } if len(m.FSLayers) == 0 { return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref)) } return m, nil } // fixManifestLayers removes repeated layers from the manifest and checks the // correctness of the parent chain. func fixManifestLayers(m *schema1.Manifest) error { imgs := make([]*image.V1Image, len(m.FSLayers)) for i := range m.FSLayers { img := &image.V1Image{} if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { return err } imgs[i] = img if err := v1.ValidateID(img.ID); err != nil { return err } } if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { // Windows base layer can point to a base layer parent that is not in manifest. return errors.New("invalid parent ID in the base layer of the image") } // check general duplicates to error instead of a deadlock idmap := make(map[string]struct{}) var lastID string for _, img := range imgs { // skip IDs that appear after each other, we handle those later if _, exists := idmap[img.ID]; img.ID != lastID && exists { return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) } lastID = img.ID idmap[lastID] = struct{}{} } // backwards loop so that we keep the remaining indexes after removing items for i := len(imgs) - 2; i >= 0; i-- { if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) m.History = append(m.History[:i], m.History[i+1:]...) } else if imgs[i].Parent != imgs[i+1].ID { return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) } } return nil } func createDownloadFile() (*os.File, error) { return os.CreateTemp("", "GetImageBlob") } func toOCIPlatform(p manifestlist.PlatformSpec) specs.Platform { return specs.Platform{ OS: p.OS, Architecture: p.Architecture, Variant: p.Variant, OSFeatures: p.OSFeatures, OSVersion: p.OSVersion, } }
package distribution // import "github.com/docker/docker/distribution" import ( "context" "encoding/json" "fmt" "io" "os" "runtime" "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/docker/distribution" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/ocischema" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/image" v1 "github.com/docker/docker/image/v1" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/system" refstore "github.com/docker/docker/reference" "github.com/docker/docker/registry" digest "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) var ( errRootFSMismatch = errors.New("layers from manifest don't match image configuration") errRootFSInvalid = errors.New("invalid rootfs in image configuration") ) // ImageConfigPullError is an error pulling the image config blob // (only applies to schema2). type ImageConfigPullError struct { Err error } // Error returns the error string for ImageConfigPullError. func (e ImageConfigPullError) Error() string { return "error pulling image configuration: " + e.Err.Error() } type v2Puller struct { V2MetadataService metadata.V2MetadataService endpoint registry.APIEndpoint config *ImagePullConfig repoInfo *registry.RepositoryInfo repo distribution.Repository manifestStore *manifestStore } func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { // TODO(tiborvass): was ReceiveTimeout p.repo, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") if err != nil { logrus.Warnf("Error getting v2 registry: %v", err) return err } p.manifestStore.remote, err = p.repo.Manifests(ctx) if err != nil { return err } if err = p.pullV2Repository(ctx, ref); err != nil { if _, ok := err.(fallbackError); ok { return err } if continueOnError(err, p.endpoint.Mirror) { return fallbackError{ err: err, transportOK: true, } } } return err } func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { var layersDownloaded bool if !reference.IsNameOnly(ref) { layersDownloaded, err = p.pullV2Tag(ctx, ref, p.config.Platform) if err != nil { return err } } else { tags, err := p.repo.Tags(ctx).All(ctx) if err != nil { return err } for _, tag := range tags { tagRef, err := reference.WithTag(ref, tag) if err != nil { return err } pulledNew, err := p.pullV2Tag(ctx, tagRef, p.config.Platform) if err != nil { // Since this is the pull-all-tags case, don't // allow an error pulling a particular tag to // make the whole pull fall back to v1. if fallbackErr, ok := err.(fallbackError); ok { return fallbackErr.err } return err } // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? layersDownloaded = layersDownloaded || pulledNew } } writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) return nil } type v2LayerDescriptor struct { digest digest.Digest diffID layer.DiffID repoInfo *registry.RepositoryInfo repo distribution.Repository V2MetadataService metadata.V2MetadataService tmpFile *os.File verifier digest.Verifier src distribution.Descriptor } func (ld *v2LayerDescriptor) Key() string { return "v2:" + ld.digest.String() } func (ld *v2LayerDescriptor) ID() string { return stringid.TruncateID(ld.digest.String()) } func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { if ld.diffID != "" { return ld.diffID, nil } return ld.V2MetadataService.GetDiffID(ld.digest) } func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { logrus.Debugf("pulling blob %q", ld.digest) var ( err error offset int64 ) if ld.tmpFile == nil { ld.tmpFile, err = createDownloadFile() if err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } } else { offset, err = ld.tmpFile.Seek(0, io.SeekEnd) if err != nil { logrus.Debugf("error seeking to end of download file: %v", err) offset = 0 ld.tmpFile.Close() if err := os.Remove(ld.tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) } ld.tmpFile, err = createDownloadFile() if err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } } else if offset != 0 { logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) } } tmpFile := ld.tmpFile layerDownload, err := ld.open(ctx) if err != nil { logrus.Errorf("Error initiating layer download: %v", err) return nil, 0, retryOnError(err) } if offset != 0 { _, err := layerDownload.Seek(offset, io.SeekStart) if err != nil { if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } return nil, 0, err } } size, err := layerDownload.Seek(0, io.SeekEnd) if err != nil { // Seek failed, perhaps because there was no Content-Length // header. This shouldn't fail the download, because we can // still continue without a progress bar. size = 0 } else { if size != 0 && offset > size { logrus.Debug("Partial download is larger than full blob. Starting over") offset = 0 if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } } // Restore the seek offset either at the beginning of the // stream, or just after the last byte we have from previous // attempts. _, err = layerDownload.Seek(offset, io.SeekStart) if err != nil { return nil, 0, err } } reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") defer reader.Close() if ld.verifier == nil { ld.verifier = ld.digest.Verifier() } _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) if err != nil { if err == transport.ErrWrongCodeForByteRange { if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } return nil, 0, err } return nil, 0, retryOnError(err) } progress.Update(progressOutput, ld.ID(), "Verifying Checksum") if !ld.verifier.Verified() { err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) logrus.Error(err) // Allow a retry if this digest verification error happened // after a resumed download. if offset != 0 { if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } return nil, 0, err } return nil, 0, xfer.DoNotRetry{Err: err} } progress.Update(progressOutput, ld.ID(), "Download complete") logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) _, err = tmpFile.Seek(0, io.SeekStart) if err != nil { tmpFile.Close() if err := os.Remove(tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) } ld.tmpFile = nil ld.verifier = nil return nil, 0, xfer.DoNotRetry{Err: err} } // hand off the temporary file to the download manager, so it will only // be closed once ld.tmpFile = nil return ioutils.NewReadCloserWrapper(tmpFile, func() error { tmpFile.Close() err := os.RemoveAll(tmpFile.Name()) if err != nil { logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) } return err }), size, nil } func (ld *v2LayerDescriptor) Close() { if ld.tmpFile != nil { ld.tmpFile.Close() if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) } } } func (ld *v2LayerDescriptor) truncateDownloadFile() error { // Need a new hash context since we will be redoing the download ld.verifier = nil if _, err := ld.tmpFile.Seek(0, io.SeekStart); err != nil { logrus.Errorf("error seeking to beginning of download file: %v", err) return err } if err := ld.tmpFile.Truncate(0); err != nil { logrus.Errorf("error truncating download file: %v", err) return err } return nil } func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { // Cache mapping from this layer's DiffID to the blobsum ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()}) } func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, platform *specs.Platform) (tagUpdated bool, err error) { var ( tagOrDigest string // Used for logging/progress only dgst digest.Digest mt string size int64 tagged reference.NamedTagged isTagged bool ) if digested, isDigested := ref.(reference.Canonical); isDigested { dgst = digested.Digest() tagOrDigest = digested.String() } else if tagged, isTagged = ref.(reference.NamedTagged); isTagged { tagService := p.repo.Tags(ctx) desc, err := tagService.Get(ctx, tagged.Tag()) if err != nil { return false, err } dgst = desc.Digest tagOrDigest = tagged.Tag() mt = desc.MediaType size = desc.Size } else { return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) } ctx = log.WithLogger(ctx, logrus.WithFields( logrus.Fields{ "digest": dgst, "remote": ref, })) desc := specs.Descriptor{ MediaType: mt, Digest: dgst, Size: size, } manifest, err := p.manifestStore.Get(ctx, desc) if err != nil { if isTagged && isNotFound(errors.Cause(err)) { logrus.WithField("ref", ref).WithError(err).Debug("Falling back to pull manifest by tag") msg := `%s Failed to pull manifest by the resolved digest. This registry does not appear to conform to the distribution registry specification; falling back to pull by tag. This fallback is DEPRECATED, and will be removed in a future release. Please contact admins of %s. %s ` warnEmoji := "\U000026A0\U0000FE0F" progress.Messagef(p.config.ProgressOutput, "WARNING", msg, warnEmoji, p.endpoint.URL, warnEmoji) // Fetch by tag worked, but fetch by digest didn't. // This is a broken registry implementation. // We'll fallback to the old behavior and get the manifest by tag. var ms distribution.ManifestService ms, err = p.repo.Manifests(ctx) if err != nil { return false, err } manifest, err = ms.Get(ctx, "", distribution.WithTag(tagged.Tag())) err = errors.Wrap(err, "error after falling back to get manifest by tag") } if err != nil { return false, err } } if manifest == nil { return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) } if m, ok := manifest.(*schema2.DeserializedManifest); ok { var allowedMediatype bool for _, t := range p.config.Schema2Types { if m.Manifest.Config.MediaType == t { allowedMediatype = true break } } if !allowedMediatype { configClass := mediaTypeClasses[m.Manifest.Config.MediaType] if configClass == "" { configClass = "unknown" } return false, invalidManifestClassError{m.Manifest.Config.MediaType, configClass} } } logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) var ( id digest.Digest manifestDigest digest.Digest ) switch v := manifest.(type) { case *schema1.SignedManifest: if p.config.RequireSchema2 { return false, fmt.Errorf("invalid manifest: not schema2") } // give registries time to upgrade to schema2 and only warn if we know a registry has been upgraded long time ago // TODO: condition to be removed if reference.Domain(ref) == "docker.io" { msg := fmt.Sprintf("Image %s uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref) logrus.Warn(msg) progress.Message(p.config.ProgressOutput, "", msg) } id, manifestDigest, err = p.pullSchema1(ctx, ref, v, platform) if err != nil { return false, err } case *schema2.DeserializedManifest: id, manifestDigest, err = p.pullSchema2(ctx, ref, v, platform) if err != nil { return false, err } case *ocischema.DeserializedManifest: id, manifestDigest, err = p.pullOCI(ctx, ref, v, platform) if err != nil { return false, err } case *manifestlist.DeserializedManifestList: id, manifestDigest, err = p.pullManifestList(ctx, ref, v, platform) if err != nil { return false, err } default: return false, invalidManifestFormatError{} } progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) if p.config.ReferenceStore != nil { oldTagID, err := p.config.ReferenceStore.Get(ref) if err == nil { if oldTagID == id { return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) } } else if err != refstore.ErrDoesNotExist { return false, err } if canonical, ok := ref.(reference.Canonical); ok { if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { return false, err } } else { if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { return false, err } if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { return false, err } } } return true, nil } func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { if platform != nil { // Early bath if the requested OS doesn't match that of the configuration. // This avoids doing the download, only to potentially fail later. if !system.IsOSSupported(platform.OS) { return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", runtime.GOOS, platform.OS) } } var verifiedManifest *schema1.Manifest verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) if err != nil { return "", "", err } rootFS := image.NewRootFS() // remove duplicate layers and check parent chain validity err = fixManifestLayers(verifiedManifest) if err != nil { return "", "", err } var descriptors []xfer.DownloadDescriptor // Image history converted to the new format var history []image.History // Note that the order of this loop is in the direction of bottom-most // to top-most, so that the downloads slice gets ordered correctly. for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { blobSum := verifiedManifest.FSLayers[i].BlobSum if err = blobSum.Validate(); err != nil { return "", "", errors.Wrapf(err, "could not validate layer digest %q", blobSum) } var throwAway struct { ThrowAway bool `json:"throwaway,omitempty"` } if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { return "", "", err } h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) if err != nil { return "", "", err } history = append(history, h) if throwAway.ThrowAway { continue } layerDescriptor := &v2LayerDescriptor{ digest: blobSum, repoInfo: p.repoInfo, repo: p.repo, V2MetadataService: p.V2MetadataService, } descriptors = append(descriptors, layerDescriptor) } resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, runtime.GOOS, descriptors, p.config.ProgressOutput) if err != nil { return "", "", err } defer release() config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) if err != nil { return "", "", err } imageID, err := p.config.ImageStore.Put(ctx, config) if err != nil { return "", "", err } manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) return imageID, manifestDigest, nil } func (p *v2Puller) pullSchema2Layers(ctx context.Context, target distribution.Descriptor, layers []distribution.Descriptor, platform *specs.Platform) (id digest.Digest, err error) { if _, err := p.config.ImageStore.Get(ctx, target.Digest); err == nil { // If the image already exists locally, no need to pull // anything. return target.Digest, nil } var descriptors []xfer.DownloadDescriptor // Note that the order of this loop is in the direction of bottom-most // to top-most, so that the downloads slice gets ordered correctly. for _, d := range layers { if err := d.Digest.Validate(); err != nil { return "", errors.Wrapf(err, "could not validate layer digest %q", d.Digest) } layerDescriptor := &v2LayerDescriptor{ digest: d.Digest, repo: p.repo, repoInfo: p.repoInfo, V2MetadataService: p.V2MetadataService, src: d, } descriptors = append(descriptors, layerDescriptor) } configChan := make(chan []byte, 1) configErrChan := make(chan error, 1) layerErrChan := make(chan error, 1) downloadsDone := make(chan struct{}) var cancel func() ctx, cancel = context.WithCancel(ctx) defer cancel() // Pull the image config go func() { configJSON, err := p.pullSchema2Config(ctx, target.Digest) if err != nil { configErrChan <- ImageConfigPullError{Err: err} cancel() return } configChan <- configJSON }() var ( configJSON []byte // raw serialized image config downloadedRootFS *image.RootFS // rootFS from registered layers configRootFS *image.RootFS // rootFS from configuration release func() // release resources from rootFS download configPlatform *specs.Platform // for LCOW when registering downloaded layers ) layerStoreOS := runtime.GOOS if platform != nil { layerStoreOS = platform.OS } // https://github.com/docker/docker/issues/24766 - Err on the side of caution, // explicitly blocking images intended for linux from the Windows daemon. On // Windows, we do this before the attempt to download, effectively serialising // the download slightly slowing it down. We have to do it this way, as // chances are the download of layers itself would fail due to file names // which aren't suitable for NTFS. At some point in the future, if a similar // check to block Windows images being pulled on Linux is implemented, it // may be necessary to perform the same type of serialisation. if runtime.GOOS == "windows" { configJSON, configRootFS, configPlatform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) if err != nil { return "", err } if configRootFS == nil { return "", errRootFSInvalid } if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil { return "", err } if len(descriptors) != len(configRootFS.DiffIDs) { return "", errRootFSMismatch } if platform == nil { // Early bath if the requested OS doesn't match that of the configuration. // This avoids doing the download, only to potentially fail later. if !system.IsOSSupported(configPlatform.OS) { return "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, layerStoreOS) } layerStoreOS = configPlatform.OS } // Populate diff ids in descriptors to avoid downloading foreign layers // which have been side loaded for i := range descriptors { descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i] } } if p.config.DownloadManager != nil { go func() { var ( err error rootFS image.RootFS ) downloadRootFS := *image.NewRootFS() rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, layerStoreOS, descriptors, p.config.ProgressOutput) if err != nil { // Intentionally do not cancel the config download here // as the error from config download (if there is one) // is more interesting than the layer download error layerErrChan <- err return } downloadedRootFS = &rootFS close(downloadsDone) }() } else { // We have nothing to download close(downloadsDone) } if configJSON == nil { configJSON, configRootFS, _, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) if err == nil && configRootFS == nil { err = errRootFSInvalid } if err != nil { cancel() select { case <-downloadsDone: case <-layerErrChan: } return "", err } } select { case <-downloadsDone: case err = <-layerErrChan: return "", err } if release != nil { defer release() } if downloadedRootFS != nil { // The DiffIDs returned in rootFS MUST match those in the config. // Otherwise the image config could be referencing layers that aren't // included in the manifest. if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { return "", errRootFSMismatch } for i := range downloadedRootFS.DiffIDs { if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { return "", errRootFSMismatch } } } imageID, err := p.config.ImageStore.Put(ctx, configJSON) if err != nil { return "", err } return imageID, nil } func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { manifestDigest, err = schema2ManifestDigest(ref, mfst) if err != nil { return "", "", err } id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform) return id, manifestDigest, err } func (p *v2Puller) pullOCI(ctx context.Context, ref reference.Named, mfst *ocischema.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { manifestDigest, err = schema2ManifestDigest(ref, mfst) if err != nil { return "", "", err } id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform) return id, manifestDigest, err } func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *specs.Platform, error) { select { case configJSON := <-configChan: rootfs, err := s.RootFSFromConfig(configJSON) if err != nil { return nil, nil, nil, err } platform, err := s.PlatformFromConfig(configJSON) if err != nil { return nil, nil, nil, err } return configJSON, rootfs, platform, nil case err := <-errChan: return nil, nil, nil, err // Don't need a case for ctx.Done in the select because cancellation // will trigger an error in p.pullSchema2ImageConfig. } } // pullManifestList handles "manifest lists" which point to various // platform-specific manifests. func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, pp *specs.Platform) (id digest.Digest, manifestListDigest digest.Digest, err error) { manifestListDigest, err = schema2ManifestDigest(ref, mfstList) if err != nil { return "", "", err } var platform specs.Platform if pp != nil { platform = *pp } logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s/%s match", ref, len(mfstList.Manifests), platforms.Format(platform), runtime.GOARCH) manifestMatches := filterManifests(mfstList.Manifests, platform) if len(manifestMatches) == 0 { errMsg := fmt.Sprintf("no matching manifest for %s in the manifest list entries", formatPlatform(platform)) logrus.Debugf(errMsg) return "", "", errors.New(errMsg) } if len(manifestMatches) > 1 { logrus.Debugf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String()) } match := manifestMatches[0] if err := checkImageCompatibility(match.Platform.OS, match.Platform.OSVersion); err != nil { return "", "", err } desc := specs.Descriptor{ Digest: match.Digest, Size: match.Size, MediaType: match.MediaType, } manifest, err := p.manifestStore.Get(ctx, desc) if err != nil { return "", "", err } manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), match.Digest) if err != nil { return "", "", err } switch v := manifest.(type) { case *schema1.SignedManifest: msg := fmt.Sprintf("[DEPRECATION NOTICE] v2 schema1 manifests in manifest lists are not supported and will break in a future release. Suggest author of %s to upgrade to v2 schema2. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref) logrus.Warn(msg) progress.Message(p.config.ProgressOutput, "", msg) platform := toOCIPlatform(manifestMatches[0].Platform) id, _, err = p.pullSchema1(ctx, manifestRef, v, &platform) if err != nil { return "", "", err } case *schema2.DeserializedManifest: platform := toOCIPlatform(manifestMatches[0].Platform) id, _, err = p.pullSchema2(ctx, manifestRef, v, &platform) if err != nil { return "", "", err } case *ocischema.DeserializedManifest: platform := toOCIPlatform(manifestMatches[0].Platform) id, _, err = p.pullOCI(ctx, manifestRef, v, &platform) if err != nil { return "", "", err } default: return "", "", errors.New("unsupported manifest format") } return id, manifestListDigest, err } func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { blobs := p.repo.Blobs(ctx) configJSON, err = blobs.Get(ctx, dgst) if err != nil { return nil, err } // Verify image config digest verifier := dgst.Verifier() if _, err := verifier.Write(configJSON); err != nil { return nil, err } if !verifier.Verified() { err := fmt.Errorf("image config verification failed for digest %s", dgst) logrus.Error(err) return nil, err } return configJSON, nil } // schema2ManifestDigest computes the manifest digest, and, if pulling by // digest, ensures that it matches the requested digest. func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { _, canonical, err := mfst.Payload() if err != nil { return "", err } // If pull by digest, then verify the manifest digest. if digested, isDigested := ref.(reference.Canonical); isDigested { verifier := digested.Digest().Verifier() if _, err := verifier.Write(canonical); err != nil { return "", err } if !verifier.Verified() { err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) logrus.Error(err) return "", err } return digested.Digest(), nil } return digest.FromBytes(canonical), nil } func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) { // If pull by digest, then verify the manifest digest. NOTE: It is // important to do this first, before any other content validation. If the // digest cannot be verified, don't even bother with those other things. if digested, isCanonical := ref.(reference.Canonical); isCanonical { verifier := digested.Digest().Verifier() if _, err := verifier.Write(signedManifest.Canonical); err != nil { return nil, err } if !verifier.Verified() { err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) logrus.Error(err) return nil, err } } m = &signedManifest.Manifest if m.SchemaVersion != 1 { return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref)) } if len(m.FSLayers) != len(m.History) { return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref)) } if len(m.FSLayers) == 0 { return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref)) } return m, nil } // fixManifestLayers removes repeated layers from the manifest and checks the // correctness of the parent chain. func fixManifestLayers(m *schema1.Manifest) error { imgs := make([]*image.V1Image, len(m.FSLayers)) for i := range m.FSLayers { img := &image.V1Image{} if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { return err } imgs[i] = img if err := v1.ValidateID(img.ID); err != nil { return err } } if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { // Windows base layer can point to a base layer parent that is not in manifest. return errors.New("invalid parent ID in the base layer of the image") } // check general duplicates to error instead of a deadlock idmap := make(map[string]struct{}) var lastID string for _, img := range imgs { // skip IDs that appear after each other, we handle those later if _, exists := idmap[img.ID]; img.ID != lastID && exists { return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) } lastID = img.ID idmap[lastID] = struct{}{} } // backwards loop so that we keep the remaining indexes after removing items for i := len(imgs) - 2; i >= 0; i-- { if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) m.History = append(m.History[:i], m.History[i+1:]...) } else if imgs[i].Parent != imgs[i+1].ID { return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) } } return nil } func createDownloadFile() (*os.File, error) { return os.CreateTemp("", "GetImageBlob") } func toOCIPlatform(p manifestlist.PlatformSpec) specs.Platform { return specs.Platform{ OS: p.OS, Architecture: p.Architecture, Variant: p.Variant, OSFeatures: p.OSFeatures, OSVersion: p.OSVersion, } }
dkkb
fecf45b09add4f6289ba1fcec67d6993b04db7af
8684f482e4cb84fee269d8e91176ca8e1afcc218
Looks like this may not be needed as well (from a quick look), as this config is attached to `p`, which is also what the `pullV2Repository()` function is attached to 🤔
thaJeztah
4,485
moby/moby
42,782
Remove platform argument from Puller interface.
The `platform` argument is unneeded because `ImagePullConfig` is already in the `v2Puller` struct. <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Remove platform argument from Puller interface. **- How I did it** The platform option was introduced in this [contribution](https://github.com/moby/moby/commit/0380fbff37922cadf294851b1546f4c212c7f364#diff-cd6eca1d1dbceb8e107577c74bf1760760204df66f0ff833c8ca35291e40cad0R24), but I don't think it's necessary, we can just use ImagePullConfig directly. **- How to verify it** **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-08-25 04:06:26+00:00
2021-08-30 11:19:08+00:00
distribution/pull_v2.go
package distribution // import "github.com/docker/docker/distribution" import ( "context" "encoding/json" "fmt" "io" "os" "runtime" "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/docker/distribution" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/ocischema" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/image" v1 "github.com/docker/docker/image/v1" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/system" refstore "github.com/docker/docker/reference" "github.com/docker/docker/registry" digest "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) var ( errRootFSMismatch = errors.New("layers from manifest don't match image configuration") errRootFSInvalid = errors.New("invalid rootfs in image configuration") ) // ImageConfigPullError is an error pulling the image config blob // (only applies to schema2). type ImageConfigPullError struct { Err error } // Error returns the error string for ImageConfigPullError. func (e ImageConfigPullError) Error() string { return "error pulling image configuration: " + e.Err.Error() } type v2Puller struct { V2MetadataService metadata.V2MetadataService endpoint registry.APIEndpoint config *ImagePullConfig repoInfo *registry.RepositoryInfo repo distribution.Repository manifestStore *manifestStore } func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, platform *specs.Platform) (err error) { // TODO(tiborvass): was ReceiveTimeout p.repo, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") if err != nil { logrus.Warnf("Error getting v2 registry: %v", err) return err } p.manifestStore.remote, err = p.repo.Manifests(ctx) if err != nil { return err } if err = p.pullV2Repository(ctx, ref, platform); err != nil { if _, ok := err.(fallbackError); ok { return err } if continueOnError(err, p.endpoint.Mirror) { return fallbackError{ err: err, transportOK: true, } } } return err } func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, platform *specs.Platform) (err error) { var layersDownloaded bool if !reference.IsNameOnly(ref) { layersDownloaded, err = p.pullV2Tag(ctx, ref, platform) if err != nil { return err } } else { tags, err := p.repo.Tags(ctx).All(ctx) if err != nil { return err } for _, tag := range tags { tagRef, err := reference.WithTag(ref, tag) if err != nil { return err } pulledNew, err := p.pullV2Tag(ctx, tagRef, platform) if err != nil { // Since this is the pull-all-tags case, don't // allow an error pulling a particular tag to // make the whole pull fall back to v1. if fallbackErr, ok := err.(fallbackError); ok { return fallbackErr.err } return err } // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? layersDownloaded = layersDownloaded || pulledNew } } writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) return nil } type v2LayerDescriptor struct { digest digest.Digest diffID layer.DiffID repoInfo *registry.RepositoryInfo repo distribution.Repository V2MetadataService metadata.V2MetadataService tmpFile *os.File verifier digest.Verifier src distribution.Descriptor } func (ld *v2LayerDescriptor) Key() string { return "v2:" + ld.digest.String() } func (ld *v2LayerDescriptor) ID() string { return stringid.TruncateID(ld.digest.String()) } func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { if ld.diffID != "" { return ld.diffID, nil } return ld.V2MetadataService.GetDiffID(ld.digest) } func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { logrus.Debugf("pulling blob %q", ld.digest) var ( err error offset int64 ) if ld.tmpFile == nil { ld.tmpFile, err = createDownloadFile() if err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } } else { offset, err = ld.tmpFile.Seek(0, io.SeekEnd) if err != nil { logrus.Debugf("error seeking to end of download file: %v", err) offset = 0 ld.tmpFile.Close() if err := os.Remove(ld.tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) } ld.tmpFile, err = createDownloadFile() if err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } } else if offset != 0 { logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) } } tmpFile := ld.tmpFile layerDownload, err := ld.open(ctx) if err != nil { logrus.Errorf("Error initiating layer download: %v", err) return nil, 0, retryOnError(err) } if offset != 0 { _, err := layerDownload.Seek(offset, io.SeekStart) if err != nil { if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } return nil, 0, err } } size, err := layerDownload.Seek(0, io.SeekEnd) if err != nil { // Seek failed, perhaps because there was no Content-Length // header. This shouldn't fail the download, because we can // still continue without a progress bar. size = 0 } else { if size != 0 && offset > size { logrus.Debug("Partial download is larger than full blob. Starting over") offset = 0 if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } } // Restore the seek offset either at the beginning of the // stream, or just after the last byte we have from previous // attempts. _, err = layerDownload.Seek(offset, io.SeekStart) if err != nil { return nil, 0, err } } reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") defer reader.Close() if ld.verifier == nil { ld.verifier = ld.digest.Verifier() } _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) if err != nil { if err == transport.ErrWrongCodeForByteRange { if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } return nil, 0, err } return nil, 0, retryOnError(err) } progress.Update(progressOutput, ld.ID(), "Verifying Checksum") if !ld.verifier.Verified() { err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) logrus.Error(err) // Allow a retry if this digest verification error happened // after a resumed download. if offset != 0 { if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } return nil, 0, err } return nil, 0, xfer.DoNotRetry{Err: err} } progress.Update(progressOutput, ld.ID(), "Download complete") logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) _, err = tmpFile.Seek(0, io.SeekStart) if err != nil { tmpFile.Close() if err := os.Remove(tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) } ld.tmpFile = nil ld.verifier = nil return nil, 0, xfer.DoNotRetry{Err: err} } // hand off the temporary file to the download manager, so it will only // be closed once ld.tmpFile = nil return ioutils.NewReadCloserWrapper(tmpFile, func() error { tmpFile.Close() err := os.RemoveAll(tmpFile.Name()) if err != nil { logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) } return err }), size, nil } func (ld *v2LayerDescriptor) Close() { if ld.tmpFile != nil { ld.tmpFile.Close() if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) } } } func (ld *v2LayerDescriptor) truncateDownloadFile() error { // Need a new hash context since we will be redoing the download ld.verifier = nil if _, err := ld.tmpFile.Seek(0, io.SeekStart); err != nil { logrus.Errorf("error seeking to beginning of download file: %v", err) return err } if err := ld.tmpFile.Truncate(0); err != nil { logrus.Errorf("error truncating download file: %v", err) return err } return nil } func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { // Cache mapping from this layer's DiffID to the blobsum ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()}) } func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, platform *specs.Platform) (tagUpdated bool, err error) { var ( tagOrDigest string // Used for logging/progress only dgst digest.Digest mt string size int64 tagged reference.NamedTagged isTagged bool ) if digested, isDigested := ref.(reference.Canonical); isDigested { dgst = digested.Digest() tagOrDigest = digested.String() } else if tagged, isTagged = ref.(reference.NamedTagged); isTagged { tagService := p.repo.Tags(ctx) desc, err := tagService.Get(ctx, tagged.Tag()) if err != nil { return false, err } dgst = desc.Digest tagOrDigest = tagged.Tag() mt = desc.MediaType size = desc.Size } else { return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) } ctx = log.WithLogger(ctx, logrus.WithFields( logrus.Fields{ "digest": dgst, "remote": ref, })) desc := specs.Descriptor{ MediaType: mt, Digest: dgst, Size: size, } manifest, err := p.manifestStore.Get(ctx, desc) if err != nil { if isTagged && isNotFound(errors.Cause(err)) { logrus.WithField("ref", ref).WithError(err).Debug("Falling back to pull manifest by tag") msg := `%s Failed to pull manifest by the resolved digest. This registry does not appear to conform to the distribution registry specification; falling back to pull by tag. This fallback is DEPRECATED, and will be removed in a future release. Please contact admins of %s. %s ` warnEmoji := "\U000026A0\U0000FE0F" progress.Messagef(p.config.ProgressOutput, "WARNING", msg, warnEmoji, p.endpoint.URL, warnEmoji) // Fetch by tag worked, but fetch by digest didn't. // This is a broken registry implementation. // We'll fallback to the old behavior and get the manifest by tag. var ms distribution.ManifestService ms, err = p.repo.Manifests(ctx) if err != nil { return false, err } manifest, err = ms.Get(ctx, "", distribution.WithTag(tagged.Tag())) err = errors.Wrap(err, "error after falling back to get manifest by tag") } if err != nil { return false, err } } if manifest == nil { return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) } if m, ok := manifest.(*schema2.DeserializedManifest); ok { var allowedMediatype bool for _, t := range p.config.Schema2Types { if m.Manifest.Config.MediaType == t { allowedMediatype = true break } } if !allowedMediatype { configClass := mediaTypeClasses[m.Manifest.Config.MediaType] if configClass == "" { configClass = "unknown" } return false, invalidManifestClassError{m.Manifest.Config.MediaType, configClass} } } logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) var ( id digest.Digest manifestDigest digest.Digest ) switch v := manifest.(type) { case *schema1.SignedManifest: if p.config.RequireSchema2 { return false, fmt.Errorf("invalid manifest: not schema2") } // give registries time to upgrade to schema2 and only warn if we know a registry has been upgraded long time ago // TODO: condition to be removed if reference.Domain(ref) == "docker.io" { msg := fmt.Sprintf("Image %s uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref) logrus.Warn(msg) progress.Message(p.config.ProgressOutput, "", msg) } id, manifestDigest, err = p.pullSchema1(ctx, ref, v, platform) if err != nil { return false, err } case *schema2.DeserializedManifest: id, manifestDigest, err = p.pullSchema2(ctx, ref, v, platform) if err != nil { return false, err } case *ocischema.DeserializedManifest: id, manifestDigest, err = p.pullOCI(ctx, ref, v, platform) if err != nil { return false, err } case *manifestlist.DeserializedManifestList: id, manifestDigest, err = p.pullManifestList(ctx, ref, v, platform) if err != nil { return false, err } default: return false, invalidManifestFormatError{} } progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) if p.config.ReferenceStore != nil { oldTagID, err := p.config.ReferenceStore.Get(ref) if err == nil { if oldTagID == id { return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) } } else if err != refstore.ErrDoesNotExist { return false, err } if canonical, ok := ref.(reference.Canonical); ok { if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { return false, err } } else { if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { return false, err } if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { return false, err } } } return true, nil } func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { if platform != nil { // Early bath if the requested OS doesn't match that of the configuration. // This avoids doing the download, only to potentially fail later. if !system.IsOSSupported(platform.OS) { return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", runtime.GOOS, platform.OS) } } var verifiedManifest *schema1.Manifest verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) if err != nil { return "", "", err } rootFS := image.NewRootFS() // remove duplicate layers and check parent chain validity err = fixManifestLayers(verifiedManifest) if err != nil { return "", "", err } var descriptors []xfer.DownloadDescriptor // Image history converted to the new format var history []image.History // Note that the order of this loop is in the direction of bottom-most // to top-most, so that the downloads slice gets ordered correctly. for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { blobSum := verifiedManifest.FSLayers[i].BlobSum if err = blobSum.Validate(); err != nil { return "", "", errors.Wrapf(err, "could not validate layer digest %q", blobSum) } var throwAway struct { ThrowAway bool `json:"throwaway,omitempty"` } if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { return "", "", err } h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) if err != nil { return "", "", err } history = append(history, h) if throwAway.ThrowAway { continue } layerDescriptor := &v2LayerDescriptor{ digest: blobSum, repoInfo: p.repoInfo, repo: p.repo, V2MetadataService: p.V2MetadataService, } descriptors = append(descriptors, layerDescriptor) } resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, runtime.GOOS, descriptors, p.config.ProgressOutput) if err != nil { return "", "", err } defer release() config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) if err != nil { return "", "", err } imageID, err := p.config.ImageStore.Put(ctx, config) if err != nil { return "", "", err } manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) return imageID, manifestDigest, nil } func (p *v2Puller) pullSchema2Layers(ctx context.Context, target distribution.Descriptor, layers []distribution.Descriptor, platform *specs.Platform) (id digest.Digest, err error) { if _, err := p.config.ImageStore.Get(ctx, target.Digest); err == nil { // If the image already exists locally, no need to pull // anything. return target.Digest, nil } var descriptors []xfer.DownloadDescriptor // Note that the order of this loop is in the direction of bottom-most // to top-most, so that the downloads slice gets ordered correctly. for _, d := range layers { if err := d.Digest.Validate(); err != nil { return "", errors.Wrapf(err, "could not validate layer digest %q", d.Digest) } layerDescriptor := &v2LayerDescriptor{ digest: d.Digest, repo: p.repo, repoInfo: p.repoInfo, V2MetadataService: p.V2MetadataService, src: d, } descriptors = append(descriptors, layerDescriptor) } configChan := make(chan []byte, 1) configErrChan := make(chan error, 1) layerErrChan := make(chan error, 1) downloadsDone := make(chan struct{}) var cancel func() ctx, cancel = context.WithCancel(ctx) defer cancel() // Pull the image config go func() { configJSON, err := p.pullSchema2Config(ctx, target.Digest) if err != nil { configErrChan <- ImageConfigPullError{Err: err} cancel() return } configChan <- configJSON }() var ( configJSON []byte // raw serialized image config downloadedRootFS *image.RootFS // rootFS from registered layers configRootFS *image.RootFS // rootFS from configuration release func() // release resources from rootFS download configPlatform *specs.Platform // for LCOW when registering downloaded layers ) layerStoreOS := runtime.GOOS if platform != nil { layerStoreOS = platform.OS } // https://github.com/docker/docker/issues/24766 - Err on the side of caution, // explicitly blocking images intended for linux from the Windows daemon. On // Windows, we do this before the attempt to download, effectively serialising // the download slightly slowing it down. We have to do it this way, as // chances are the download of layers itself would fail due to file names // which aren't suitable for NTFS. At some point in the future, if a similar // check to block Windows images being pulled on Linux is implemented, it // may be necessary to perform the same type of serialisation. if runtime.GOOS == "windows" { configJSON, configRootFS, configPlatform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) if err != nil { return "", err } if configRootFS == nil { return "", errRootFSInvalid } if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil { return "", err } if len(descriptors) != len(configRootFS.DiffIDs) { return "", errRootFSMismatch } if platform == nil { // Early bath if the requested OS doesn't match that of the configuration. // This avoids doing the download, only to potentially fail later. if !system.IsOSSupported(configPlatform.OS) { return "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, layerStoreOS) } layerStoreOS = configPlatform.OS } // Populate diff ids in descriptors to avoid downloading foreign layers // which have been side loaded for i := range descriptors { descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i] } } if p.config.DownloadManager != nil { go func() { var ( err error rootFS image.RootFS ) downloadRootFS := *image.NewRootFS() rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, layerStoreOS, descriptors, p.config.ProgressOutput) if err != nil { // Intentionally do not cancel the config download here // as the error from config download (if there is one) // is more interesting than the layer download error layerErrChan <- err return } downloadedRootFS = &rootFS close(downloadsDone) }() } else { // We have nothing to download close(downloadsDone) } if configJSON == nil { configJSON, configRootFS, _, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) if err == nil && configRootFS == nil { err = errRootFSInvalid } if err != nil { cancel() select { case <-downloadsDone: case <-layerErrChan: } return "", err } } select { case <-downloadsDone: case err = <-layerErrChan: return "", err } if release != nil { defer release() } if downloadedRootFS != nil { // The DiffIDs returned in rootFS MUST match those in the config. // Otherwise the image config could be referencing layers that aren't // included in the manifest. if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { return "", errRootFSMismatch } for i := range downloadedRootFS.DiffIDs { if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { return "", errRootFSMismatch } } } imageID, err := p.config.ImageStore.Put(ctx, configJSON) if err != nil { return "", err } return imageID, nil } func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { manifestDigest, err = schema2ManifestDigest(ref, mfst) if err != nil { return "", "", err } id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform) return id, manifestDigest, err } func (p *v2Puller) pullOCI(ctx context.Context, ref reference.Named, mfst *ocischema.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { manifestDigest, err = schema2ManifestDigest(ref, mfst) if err != nil { return "", "", err } id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform) return id, manifestDigest, err } func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *specs.Platform, error) { select { case configJSON := <-configChan: rootfs, err := s.RootFSFromConfig(configJSON) if err != nil { return nil, nil, nil, err } platform, err := s.PlatformFromConfig(configJSON) if err != nil { return nil, nil, nil, err } return configJSON, rootfs, platform, nil case err := <-errChan: return nil, nil, nil, err // Don't need a case for ctx.Done in the select because cancellation // will trigger an error in p.pullSchema2ImageConfig. } } // pullManifestList handles "manifest lists" which point to various // platform-specific manifests. func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, pp *specs.Platform) (id digest.Digest, manifestListDigest digest.Digest, err error) { manifestListDigest, err = schema2ManifestDigest(ref, mfstList) if err != nil { return "", "", err } var platform specs.Platform if pp != nil { platform = *pp } logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s/%s match", ref, len(mfstList.Manifests), platforms.Format(platform), runtime.GOARCH) manifestMatches := filterManifests(mfstList.Manifests, platform) if len(manifestMatches) == 0 { errMsg := fmt.Sprintf("no matching manifest for %s in the manifest list entries", formatPlatform(platform)) logrus.Debugf(errMsg) return "", "", errors.New(errMsg) } if len(manifestMatches) > 1 { logrus.Debugf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String()) } match := manifestMatches[0] if err := checkImageCompatibility(match.Platform.OS, match.Platform.OSVersion); err != nil { return "", "", err } desc := specs.Descriptor{ Digest: match.Digest, Size: match.Size, MediaType: match.MediaType, } manifest, err := p.manifestStore.Get(ctx, desc) if err != nil { return "", "", err } manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), match.Digest) if err != nil { return "", "", err } switch v := manifest.(type) { case *schema1.SignedManifest: msg := fmt.Sprintf("[DEPRECATION NOTICE] v2 schema1 manifests in manifest lists are not supported and will break in a future release. Suggest author of %s to upgrade to v2 schema2. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref) logrus.Warn(msg) progress.Message(p.config.ProgressOutput, "", msg) platform := toOCIPlatform(manifestMatches[0].Platform) id, _, err = p.pullSchema1(ctx, manifestRef, v, &platform) if err != nil { return "", "", err } case *schema2.DeserializedManifest: platform := toOCIPlatform(manifestMatches[0].Platform) id, _, err = p.pullSchema2(ctx, manifestRef, v, &platform) if err != nil { return "", "", err } case *ocischema.DeserializedManifest: platform := toOCIPlatform(manifestMatches[0].Platform) id, _, err = p.pullOCI(ctx, manifestRef, v, &platform) if err != nil { return "", "", err } default: return "", "", errors.New("unsupported manifest format") } return id, manifestListDigest, err } func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { blobs := p.repo.Blobs(ctx) configJSON, err = blobs.Get(ctx, dgst) if err != nil { return nil, err } // Verify image config digest verifier := dgst.Verifier() if _, err := verifier.Write(configJSON); err != nil { return nil, err } if !verifier.Verified() { err := fmt.Errorf("image config verification failed for digest %s", dgst) logrus.Error(err) return nil, err } return configJSON, nil } // schema2ManifestDigest computes the manifest digest, and, if pulling by // digest, ensures that it matches the requested digest. func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { _, canonical, err := mfst.Payload() if err != nil { return "", err } // If pull by digest, then verify the manifest digest. if digested, isDigested := ref.(reference.Canonical); isDigested { verifier := digested.Digest().Verifier() if _, err := verifier.Write(canonical); err != nil { return "", err } if !verifier.Verified() { err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) logrus.Error(err) return "", err } return digested.Digest(), nil } return digest.FromBytes(canonical), nil } func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) { // If pull by digest, then verify the manifest digest. NOTE: It is // important to do this first, before any other content validation. If the // digest cannot be verified, don't even bother with those other things. if digested, isCanonical := ref.(reference.Canonical); isCanonical { verifier := digested.Digest().Verifier() if _, err := verifier.Write(signedManifest.Canonical); err != nil { return nil, err } if !verifier.Verified() { err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) logrus.Error(err) return nil, err } } m = &signedManifest.Manifest if m.SchemaVersion != 1 { return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref)) } if len(m.FSLayers) != len(m.History) { return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref)) } if len(m.FSLayers) == 0 { return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref)) } return m, nil } // fixManifestLayers removes repeated layers from the manifest and checks the // correctness of the parent chain. func fixManifestLayers(m *schema1.Manifest) error { imgs := make([]*image.V1Image, len(m.FSLayers)) for i := range m.FSLayers { img := &image.V1Image{} if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { return err } imgs[i] = img if err := v1.ValidateID(img.ID); err != nil { return err } } if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { // Windows base layer can point to a base layer parent that is not in manifest. return errors.New("invalid parent ID in the base layer of the image") } // check general duplicates to error instead of a deadlock idmap := make(map[string]struct{}) var lastID string for _, img := range imgs { // skip IDs that appear after each other, we handle those later if _, exists := idmap[img.ID]; img.ID != lastID && exists { return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) } lastID = img.ID idmap[lastID] = struct{}{} } // backwards loop so that we keep the remaining indexes after removing items for i := len(imgs) - 2; i >= 0; i-- { if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) m.History = append(m.History[:i], m.History[i+1:]...) } else if imgs[i].Parent != imgs[i+1].ID { return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) } } return nil } func createDownloadFile() (*os.File, error) { return os.CreateTemp("", "GetImageBlob") } func toOCIPlatform(p manifestlist.PlatformSpec) specs.Platform { return specs.Platform{ OS: p.OS, Architecture: p.Architecture, Variant: p.Variant, OSFeatures: p.OSFeatures, OSVersion: p.OSVersion, } }
package distribution // import "github.com/docker/docker/distribution" import ( "context" "encoding/json" "fmt" "io" "os" "runtime" "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/docker/distribution" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/ocischema" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/image" v1 "github.com/docker/docker/image/v1" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/system" refstore "github.com/docker/docker/reference" "github.com/docker/docker/registry" digest "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) var ( errRootFSMismatch = errors.New("layers from manifest don't match image configuration") errRootFSInvalid = errors.New("invalid rootfs in image configuration") ) // ImageConfigPullError is an error pulling the image config blob // (only applies to schema2). type ImageConfigPullError struct { Err error } // Error returns the error string for ImageConfigPullError. func (e ImageConfigPullError) Error() string { return "error pulling image configuration: " + e.Err.Error() } type v2Puller struct { V2MetadataService metadata.V2MetadataService endpoint registry.APIEndpoint config *ImagePullConfig repoInfo *registry.RepositoryInfo repo distribution.Repository manifestStore *manifestStore } func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { // TODO(tiborvass): was ReceiveTimeout p.repo, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") if err != nil { logrus.Warnf("Error getting v2 registry: %v", err) return err } p.manifestStore.remote, err = p.repo.Manifests(ctx) if err != nil { return err } if err = p.pullV2Repository(ctx, ref); err != nil { if _, ok := err.(fallbackError); ok { return err } if continueOnError(err, p.endpoint.Mirror) { return fallbackError{ err: err, transportOK: true, } } } return err } func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { var layersDownloaded bool if !reference.IsNameOnly(ref) { layersDownloaded, err = p.pullV2Tag(ctx, ref, p.config.Platform) if err != nil { return err } } else { tags, err := p.repo.Tags(ctx).All(ctx) if err != nil { return err } for _, tag := range tags { tagRef, err := reference.WithTag(ref, tag) if err != nil { return err } pulledNew, err := p.pullV2Tag(ctx, tagRef, p.config.Platform) if err != nil { // Since this is the pull-all-tags case, don't // allow an error pulling a particular tag to // make the whole pull fall back to v1. if fallbackErr, ok := err.(fallbackError); ok { return fallbackErr.err } return err } // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? layersDownloaded = layersDownloaded || pulledNew } } writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) return nil } type v2LayerDescriptor struct { digest digest.Digest diffID layer.DiffID repoInfo *registry.RepositoryInfo repo distribution.Repository V2MetadataService metadata.V2MetadataService tmpFile *os.File verifier digest.Verifier src distribution.Descriptor } func (ld *v2LayerDescriptor) Key() string { return "v2:" + ld.digest.String() } func (ld *v2LayerDescriptor) ID() string { return stringid.TruncateID(ld.digest.String()) } func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { if ld.diffID != "" { return ld.diffID, nil } return ld.V2MetadataService.GetDiffID(ld.digest) } func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { logrus.Debugf("pulling blob %q", ld.digest) var ( err error offset int64 ) if ld.tmpFile == nil { ld.tmpFile, err = createDownloadFile() if err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } } else { offset, err = ld.tmpFile.Seek(0, io.SeekEnd) if err != nil { logrus.Debugf("error seeking to end of download file: %v", err) offset = 0 ld.tmpFile.Close() if err := os.Remove(ld.tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) } ld.tmpFile, err = createDownloadFile() if err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } } else if offset != 0 { logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) } } tmpFile := ld.tmpFile layerDownload, err := ld.open(ctx) if err != nil { logrus.Errorf("Error initiating layer download: %v", err) return nil, 0, retryOnError(err) } if offset != 0 { _, err := layerDownload.Seek(offset, io.SeekStart) if err != nil { if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } return nil, 0, err } } size, err := layerDownload.Seek(0, io.SeekEnd) if err != nil { // Seek failed, perhaps because there was no Content-Length // header. This shouldn't fail the download, because we can // still continue without a progress bar. size = 0 } else { if size != 0 && offset > size { logrus.Debug("Partial download is larger than full blob. Starting over") offset = 0 if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } } // Restore the seek offset either at the beginning of the // stream, or just after the last byte we have from previous // attempts. _, err = layerDownload.Seek(offset, io.SeekStart) if err != nil { return nil, 0, err } } reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") defer reader.Close() if ld.verifier == nil { ld.verifier = ld.digest.Verifier() } _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) if err != nil { if err == transport.ErrWrongCodeForByteRange { if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } return nil, 0, err } return nil, 0, retryOnError(err) } progress.Update(progressOutput, ld.ID(), "Verifying Checksum") if !ld.verifier.Verified() { err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) logrus.Error(err) // Allow a retry if this digest verification error happened // after a resumed download. if offset != 0 { if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } return nil, 0, err } return nil, 0, xfer.DoNotRetry{Err: err} } progress.Update(progressOutput, ld.ID(), "Download complete") logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) _, err = tmpFile.Seek(0, io.SeekStart) if err != nil { tmpFile.Close() if err := os.Remove(tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) } ld.tmpFile = nil ld.verifier = nil return nil, 0, xfer.DoNotRetry{Err: err} } // hand off the temporary file to the download manager, so it will only // be closed once ld.tmpFile = nil return ioutils.NewReadCloserWrapper(tmpFile, func() error { tmpFile.Close() err := os.RemoveAll(tmpFile.Name()) if err != nil { logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) } return err }), size, nil } func (ld *v2LayerDescriptor) Close() { if ld.tmpFile != nil { ld.tmpFile.Close() if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) } } } func (ld *v2LayerDescriptor) truncateDownloadFile() error { // Need a new hash context since we will be redoing the download ld.verifier = nil if _, err := ld.tmpFile.Seek(0, io.SeekStart); err != nil { logrus.Errorf("error seeking to beginning of download file: %v", err) return err } if err := ld.tmpFile.Truncate(0); err != nil { logrus.Errorf("error truncating download file: %v", err) return err } return nil } func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { // Cache mapping from this layer's DiffID to the blobsum ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()}) } func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, platform *specs.Platform) (tagUpdated bool, err error) { var ( tagOrDigest string // Used for logging/progress only dgst digest.Digest mt string size int64 tagged reference.NamedTagged isTagged bool ) if digested, isDigested := ref.(reference.Canonical); isDigested { dgst = digested.Digest() tagOrDigest = digested.String() } else if tagged, isTagged = ref.(reference.NamedTagged); isTagged { tagService := p.repo.Tags(ctx) desc, err := tagService.Get(ctx, tagged.Tag()) if err != nil { return false, err } dgst = desc.Digest tagOrDigest = tagged.Tag() mt = desc.MediaType size = desc.Size } else { return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) } ctx = log.WithLogger(ctx, logrus.WithFields( logrus.Fields{ "digest": dgst, "remote": ref, })) desc := specs.Descriptor{ MediaType: mt, Digest: dgst, Size: size, } manifest, err := p.manifestStore.Get(ctx, desc) if err != nil { if isTagged && isNotFound(errors.Cause(err)) { logrus.WithField("ref", ref).WithError(err).Debug("Falling back to pull manifest by tag") msg := `%s Failed to pull manifest by the resolved digest. This registry does not appear to conform to the distribution registry specification; falling back to pull by tag. This fallback is DEPRECATED, and will be removed in a future release. Please contact admins of %s. %s ` warnEmoji := "\U000026A0\U0000FE0F" progress.Messagef(p.config.ProgressOutput, "WARNING", msg, warnEmoji, p.endpoint.URL, warnEmoji) // Fetch by tag worked, but fetch by digest didn't. // This is a broken registry implementation. // We'll fallback to the old behavior and get the manifest by tag. var ms distribution.ManifestService ms, err = p.repo.Manifests(ctx) if err != nil { return false, err } manifest, err = ms.Get(ctx, "", distribution.WithTag(tagged.Tag())) err = errors.Wrap(err, "error after falling back to get manifest by tag") } if err != nil { return false, err } } if manifest == nil { return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) } if m, ok := manifest.(*schema2.DeserializedManifest); ok { var allowedMediatype bool for _, t := range p.config.Schema2Types { if m.Manifest.Config.MediaType == t { allowedMediatype = true break } } if !allowedMediatype { configClass := mediaTypeClasses[m.Manifest.Config.MediaType] if configClass == "" { configClass = "unknown" } return false, invalidManifestClassError{m.Manifest.Config.MediaType, configClass} } } logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) var ( id digest.Digest manifestDigest digest.Digest ) switch v := manifest.(type) { case *schema1.SignedManifest: if p.config.RequireSchema2 { return false, fmt.Errorf("invalid manifest: not schema2") } // give registries time to upgrade to schema2 and only warn if we know a registry has been upgraded long time ago // TODO: condition to be removed if reference.Domain(ref) == "docker.io" { msg := fmt.Sprintf("Image %s uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref) logrus.Warn(msg) progress.Message(p.config.ProgressOutput, "", msg) } id, manifestDigest, err = p.pullSchema1(ctx, ref, v, platform) if err != nil { return false, err } case *schema2.DeserializedManifest: id, manifestDigest, err = p.pullSchema2(ctx, ref, v, platform) if err != nil { return false, err } case *ocischema.DeserializedManifest: id, manifestDigest, err = p.pullOCI(ctx, ref, v, platform) if err != nil { return false, err } case *manifestlist.DeserializedManifestList: id, manifestDigest, err = p.pullManifestList(ctx, ref, v, platform) if err != nil { return false, err } default: return false, invalidManifestFormatError{} } progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) if p.config.ReferenceStore != nil { oldTagID, err := p.config.ReferenceStore.Get(ref) if err == nil { if oldTagID == id { return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) } } else if err != refstore.ErrDoesNotExist { return false, err } if canonical, ok := ref.(reference.Canonical); ok { if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { return false, err } } else { if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { return false, err } if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { return false, err } } } return true, nil } func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { if platform != nil { // Early bath if the requested OS doesn't match that of the configuration. // This avoids doing the download, only to potentially fail later. if !system.IsOSSupported(platform.OS) { return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", runtime.GOOS, platform.OS) } } var verifiedManifest *schema1.Manifest verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) if err != nil { return "", "", err } rootFS := image.NewRootFS() // remove duplicate layers and check parent chain validity err = fixManifestLayers(verifiedManifest) if err != nil { return "", "", err } var descriptors []xfer.DownloadDescriptor // Image history converted to the new format var history []image.History // Note that the order of this loop is in the direction of bottom-most // to top-most, so that the downloads slice gets ordered correctly. for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { blobSum := verifiedManifest.FSLayers[i].BlobSum if err = blobSum.Validate(); err != nil { return "", "", errors.Wrapf(err, "could not validate layer digest %q", blobSum) } var throwAway struct { ThrowAway bool `json:"throwaway,omitempty"` } if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { return "", "", err } h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) if err != nil { return "", "", err } history = append(history, h) if throwAway.ThrowAway { continue } layerDescriptor := &v2LayerDescriptor{ digest: blobSum, repoInfo: p.repoInfo, repo: p.repo, V2MetadataService: p.V2MetadataService, } descriptors = append(descriptors, layerDescriptor) } resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, runtime.GOOS, descriptors, p.config.ProgressOutput) if err != nil { return "", "", err } defer release() config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) if err != nil { return "", "", err } imageID, err := p.config.ImageStore.Put(ctx, config) if err != nil { return "", "", err } manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) return imageID, manifestDigest, nil } func (p *v2Puller) pullSchema2Layers(ctx context.Context, target distribution.Descriptor, layers []distribution.Descriptor, platform *specs.Platform) (id digest.Digest, err error) { if _, err := p.config.ImageStore.Get(ctx, target.Digest); err == nil { // If the image already exists locally, no need to pull // anything. return target.Digest, nil } var descriptors []xfer.DownloadDescriptor // Note that the order of this loop is in the direction of bottom-most // to top-most, so that the downloads slice gets ordered correctly. for _, d := range layers { if err := d.Digest.Validate(); err != nil { return "", errors.Wrapf(err, "could not validate layer digest %q", d.Digest) } layerDescriptor := &v2LayerDescriptor{ digest: d.Digest, repo: p.repo, repoInfo: p.repoInfo, V2MetadataService: p.V2MetadataService, src: d, } descriptors = append(descriptors, layerDescriptor) } configChan := make(chan []byte, 1) configErrChan := make(chan error, 1) layerErrChan := make(chan error, 1) downloadsDone := make(chan struct{}) var cancel func() ctx, cancel = context.WithCancel(ctx) defer cancel() // Pull the image config go func() { configJSON, err := p.pullSchema2Config(ctx, target.Digest) if err != nil { configErrChan <- ImageConfigPullError{Err: err} cancel() return } configChan <- configJSON }() var ( configJSON []byte // raw serialized image config downloadedRootFS *image.RootFS // rootFS from registered layers configRootFS *image.RootFS // rootFS from configuration release func() // release resources from rootFS download configPlatform *specs.Platform // for LCOW when registering downloaded layers ) layerStoreOS := runtime.GOOS if platform != nil { layerStoreOS = platform.OS } // https://github.com/docker/docker/issues/24766 - Err on the side of caution, // explicitly blocking images intended for linux from the Windows daemon. On // Windows, we do this before the attempt to download, effectively serialising // the download slightly slowing it down. We have to do it this way, as // chances are the download of layers itself would fail due to file names // which aren't suitable for NTFS. At some point in the future, if a similar // check to block Windows images being pulled on Linux is implemented, it // may be necessary to perform the same type of serialisation. if runtime.GOOS == "windows" { configJSON, configRootFS, configPlatform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) if err != nil { return "", err } if configRootFS == nil { return "", errRootFSInvalid } if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil { return "", err } if len(descriptors) != len(configRootFS.DiffIDs) { return "", errRootFSMismatch } if platform == nil { // Early bath if the requested OS doesn't match that of the configuration. // This avoids doing the download, only to potentially fail later. if !system.IsOSSupported(configPlatform.OS) { return "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, layerStoreOS) } layerStoreOS = configPlatform.OS } // Populate diff ids in descriptors to avoid downloading foreign layers // which have been side loaded for i := range descriptors { descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i] } } if p.config.DownloadManager != nil { go func() { var ( err error rootFS image.RootFS ) downloadRootFS := *image.NewRootFS() rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, layerStoreOS, descriptors, p.config.ProgressOutput) if err != nil { // Intentionally do not cancel the config download here // as the error from config download (if there is one) // is more interesting than the layer download error layerErrChan <- err return } downloadedRootFS = &rootFS close(downloadsDone) }() } else { // We have nothing to download close(downloadsDone) } if configJSON == nil { configJSON, configRootFS, _, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) if err == nil && configRootFS == nil { err = errRootFSInvalid } if err != nil { cancel() select { case <-downloadsDone: case <-layerErrChan: } return "", err } } select { case <-downloadsDone: case err = <-layerErrChan: return "", err } if release != nil { defer release() } if downloadedRootFS != nil { // The DiffIDs returned in rootFS MUST match those in the config. // Otherwise the image config could be referencing layers that aren't // included in the manifest. if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { return "", errRootFSMismatch } for i := range downloadedRootFS.DiffIDs { if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { return "", errRootFSMismatch } } } imageID, err := p.config.ImageStore.Put(ctx, configJSON) if err != nil { return "", err } return imageID, nil } func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { manifestDigest, err = schema2ManifestDigest(ref, mfst) if err != nil { return "", "", err } id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform) return id, manifestDigest, err } func (p *v2Puller) pullOCI(ctx context.Context, ref reference.Named, mfst *ocischema.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { manifestDigest, err = schema2ManifestDigest(ref, mfst) if err != nil { return "", "", err } id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform) return id, manifestDigest, err } func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *specs.Platform, error) { select { case configJSON := <-configChan: rootfs, err := s.RootFSFromConfig(configJSON) if err != nil { return nil, nil, nil, err } platform, err := s.PlatformFromConfig(configJSON) if err != nil { return nil, nil, nil, err } return configJSON, rootfs, platform, nil case err := <-errChan: return nil, nil, nil, err // Don't need a case for ctx.Done in the select because cancellation // will trigger an error in p.pullSchema2ImageConfig. } } // pullManifestList handles "manifest lists" which point to various // platform-specific manifests. func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, pp *specs.Platform) (id digest.Digest, manifestListDigest digest.Digest, err error) { manifestListDigest, err = schema2ManifestDigest(ref, mfstList) if err != nil { return "", "", err } var platform specs.Platform if pp != nil { platform = *pp } logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s/%s match", ref, len(mfstList.Manifests), platforms.Format(platform), runtime.GOARCH) manifestMatches := filterManifests(mfstList.Manifests, platform) if len(manifestMatches) == 0 { errMsg := fmt.Sprintf("no matching manifest for %s in the manifest list entries", formatPlatform(platform)) logrus.Debugf(errMsg) return "", "", errors.New(errMsg) } if len(manifestMatches) > 1 { logrus.Debugf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String()) } match := manifestMatches[0] if err := checkImageCompatibility(match.Platform.OS, match.Platform.OSVersion); err != nil { return "", "", err } desc := specs.Descriptor{ Digest: match.Digest, Size: match.Size, MediaType: match.MediaType, } manifest, err := p.manifestStore.Get(ctx, desc) if err != nil { return "", "", err } manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), match.Digest) if err != nil { return "", "", err } switch v := manifest.(type) { case *schema1.SignedManifest: msg := fmt.Sprintf("[DEPRECATION NOTICE] v2 schema1 manifests in manifest lists are not supported and will break in a future release. Suggest author of %s to upgrade to v2 schema2. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref) logrus.Warn(msg) progress.Message(p.config.ProgressOutput, "", msg) platform := toOCIPlatform(manifestMatches[0].Platform) id, _, err = p.pullSchema1(ctx, manifestRef, v, &platform) if err != nil { return "", "", err } case *schema2.DeserializedManifest: platform := toOCIPlatform(manifestMatches[0].Platform) id, _, err = p.pullSchema2(ctx, manifestRef, v, &platform) if err != nil { return "", "", err } case *ocischema.DeserializedManifest: platform := toOCIPlatform(manifestMatches[0].Platform) id, _, err = p.pullOCI(ctx, manifestRef, v, &platform) if err != nil { return "", "", err } default: return "", "", errors.New("unsupported manifest format") } return id, manifestListDigest, err } func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { blobs := p.repo.Blobs(ctx) configJSON, err = blobs.Get(ctx, dgst) if err != nil { return nil, err } // Verify image config digest verifier := dgst.Verifier() if _, err := verifier.Write(configJSON); err != nil { return nil, err } if !verifier.Verified() { err := fmt.Errorf("image config verification failed for digest %s", dgst) logrus.Error(err) return nil, err } return configJSON, nil } // schema2ManifestDigest computes the manifest digest, and, if pulling by // digest, ensures that it matches the requested digest. func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { _, canonical, err := mfst.Payload() if err != nil { return "", err } // If pull by digest, then verify the manifest digest. if digested, isDigested := ref.(reference.Canonical); isDigested { verifier := digested.Digest().Verifier() if _, err := verifier.Write(canonical); err != nil { return "", err } if !verifier.Verified() { err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) logrus.Error(err) return "", err } return digested.Digest(), nil } return digest.FromBytes(canonical), nil } func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) { // If pull by digest, then verify the manifest digest. NOTE: It is // important to do this first, before any other content validation. If the // digest cannot be verified, don't even bother with those other things. if digested, isCanonical := ref.(reference.Canonical); isCanonical { verifier := digested.Digest().Verifier() if _, err := verifier.Write(signedManifest.Canonical); err != nil { return nil, err } if !verifier.Verified() { err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) logrus.Error(err) return nil, err } } m = &signedManifest.Manifest if m.SchemaVersion != 1 { return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref)) } if len(m.FSLayers) != len(m.History) { return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref)) } if len(m.FSLayers) == 0 { return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref)) } return m, nil } // fixManifestLayers removes repeated layers from the manifest and checks the // correctness of the parent chain. func fixManifestLayers(m *schema1.Manifest) error { imgs := make([]*image.V1Image, len(m.FSLayers)) for i := range m.FSLayers { img := &image.V1Image{} if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { return err } imgs[i] = img if err := v1.ValidateID(img.ID); err != nil { return err } } if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { // Windows base layer can point to a base layer parent that is not in manifest. return errors.New("invalid parent ID in the base layer of the image") } // check general duplicates to error instead of a deadlock idmap := make(map[string]struct{}) var lastID string for _, img := range imgs { // skip IDs that appear after each other, we handle those later if _, exists := idmap[img.ID]; img.ID != lastID && exists { return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) } lastID = img.ID idmap[lastID] = struct{}{} } // backwards loop so that we keep the remaining indexes after removing items for i := len(imgs) - 2; i >= 0; i-- { if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) m.History = append(m.History[:i], m.History[i+1:]...) } else if imgs[i].Parent != imgs[i+1].ID { return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) } } return nil } func createDownloadFile() (*os.File, error) { return os.CreateTemp("", "GetImageBlob") } func toOCIPlatform(p manifestlist.PlatformSpec) specs.Platform { return specs.Platform{ OS: p.OS, Architecture: p.Architecture, Variant: p.Variant, OSFeatures: p.OSFeatures, OSVersion: p.OSVersion, } }
dkkb
fecf45b09add4f6289ba1fcec67d6993b04db7af
8684f482e4cb84fee269d8e91176ca8e1afcc218
Yes, I saw this function as well, but I wasn't sure if there was another usage for it, so I didn't modify it, and now both pullV2Repository and his tests have been changed.
dkkb
4,486
moby/moby
42,779
hack/vendor.sh: allow go version to be specified with .0
Golang '.0' releases are released without a trailing .0 (i.e. go1.17 is equal to go1.17.0). For the base image, we want to specify the go version including their patch release (golang:1.17 is equivalent to go1.17.x), so adjust the script to also accept the trailing .0, because otherwise the download-URL is not found: hack/vendor.sh archive/tar update vendored copy of archive/tar downloading: https://golang.org/dl/go1.17.0.src.tar.gz curl: (22) The requested URL returned error: 404 **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-08-23 14:57:48+00:00
2021-08-23 17:25:35+00:00
hack/vendor.sh
#!/usr/bin/env bash # This file is just wrapper around vndr (github.com/LK4D4/vndr) tool. # For updating dependencies you should change `vendor.conf` file in root of the # project. Please refer to https://github.com/LK4D4/vndr/blob/master/README.md for # vndr usage. set -e if ! hash vndr; then echo "Please install vndr with \"go get github.com/LK4D4/vndr\" and put it in your \$GOPATH" exit 1 fi if [ $# -eq 0 ] || [ "$1" = "archive/tar" ]; then echo "update vendored copy of archive/tar" : "${GO_VERSION:=$(awk -F '[ =]' '$1 == "ARG" && $2 == "GO_VERSION" { print $3; exit }' ./Dockerfile)}" rm -rf vendor/archive mkdir -p ./vendor/archive/tar echo "downloading: https://golang.org/dl/go${GO_VERSION}.src.tar.gz" curl -fsSL "https://golang.org/dl/go${GO_VERSION}.src.tar.gz" \ | tar --extract --gzip --directory=vendor/archive/tar --strip-components=4 go/src/archive/tar patch --strip=4 --directory=vendor/archive/tar --input="$PWD/patches/0001-archive-tar-do-not-populate-user-group-names.patch" fi if [ $# -eq 0 ] || [ "$1" != "archive/tar" ]; then vndr -whitelist='^archive[/\\]tar' "$@" fi
#!/usr/bin/env bash # This file is just wrapper around vndr (github.com/LK4D4/vndr) tool. # For updating dependencies you should change `vendor.conf` file in root of the # project. Please refer to https://github.com/LK4D4/vndr/blob/master/README.md for # vndr usage. set -e if ! hash vndr; then echo "Please install vndr with \"go get github.com/LK4D4/vndr\" and put it in your \$GOPATH" exit 1 fi if [ $# -eq 0 ] || [ "$1" = "archive/tar" ]; then echo "update vendored copy of archive/tar" : "${GO_VERSION:=$(awk -F '[ =]' '$1 == "ARG" && $2 == "GO_VERSION" { print $3; exit }' ./Dockerfile)}" rm -rf vendor/archive mkdir -p ./vendor/archive/tar echo "downloading: https://golang.org/dl/go${GO_VERSION%.0}.src.tar.gz" curl -fsSL "https://golang.org/dl/go${GO_VERSION%.0}.src.tar.gz" \ | tar --extract --gzip --directory=vendor/archive/tar --strip-components=4 go/src/archive/tar patch --strip=4 --directory=vendor/archive/tar --input="$PWD/patches/0001-archive-tar-do-not-populate-user-group-names.patch" fi if [ $# -eq 0 ] || [ "$1" != "archive/tar" ]; then vndr -whitelist='^archive[/\\]tar' "$@" fi
thaJeztah
8c8e4e3271e05248a3a11c6165aa1e6229295446
ab1559c97c3faa9c9a64539c3bb0f4a36c795488
We probably want this to be specific to just the trailing `.0` so we don't drop patch releases, right? ```suggestion echo "downloading: https://golang.org/dl/go${GO_VERSION%.0}.src.tar.gz" curl -fsSL "https://golang.org/dl/go${GO_VERSION%.0}.src.tar.gz" \ ```
tianon
4,487
moby/moby
42,779
hack/vendor.sh: allow go version to be specified with .0
Golang '.0' releases are released without a trailing .0 (i.e. go1.17 is equal to go1.17.0). For the base image, we want to specify the go version including their patch release (golang:1.17 is equivalent to go1.17.x), so adjust the script to also accept the trailing .0, because otherwise the download-URL is not found: hack/vendor.sh archive/tar update vendored copy of archive/tar downloading: https://golang.org/dl/go1.17.0.src.tar.gz curl: (22) The requested URL returned error: 404 **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-08-23 14:57:48+00:00
2021-08-23 17:25:35+00:00
hack/vendor.sh
#!/usr/bin/env bash # This file is just wrapper around vndr (github.com/LK4D4/vndr) tool. # For updating dependencies you should change `vendor.conf` file in root of the # project. Please refer to https://github.com/LK4D4/vndr/blob/master/README.md for # vndr usage. set -e if ! hash vndr; then echo "Please install vndr with \"go get github.com/LK4D4/vndr\" and put it in your \$GOPATH" exit 1 fi if [ $# -eq 0 ] || [ "$1" = "archive/tar" ]; then echo "update vendored copy of archive/tar" : "${GO_VERSION:=$(awk -F '[ =]' '$1 == "ARG" && $2 == "GO_VERSION" { print $3; exit }' ./Dockerfile)}" rm -rf vendor/archive mkdir -p ./vendor/archive/tar echo "downloading: https://golang.org/dl/go${GO_VERSION}.src.tar.gz" curl -fsSL "https://golang.org/dl/go${GO_VERSION}.src.tar.gz" \ | tar --extract --gzip --directory=vendor/archive/tar --strip-components=4 go/src/archive/tar patch --strip=4 --directory=vendor/archive/tar --input="$PWD/patches/0001-archive-tar-do-not-populate-user-group-names.patch" fi if [ $# -eq 0 ] || [ "$1" != "archive/tar" ]; then vndr -whitelist='^archive[/\\]tar' "$@" fi
#!/usr/bin/env bash # This file is just wrapper around vndr (github.com/LK4D4/vndr) tool. # For updating dependencies you should change `vendor.conf` file in root of the # project. Please refer to https://github.com/LK4D4/vndr/blob/master/README.md for # vndr usage. set -e if ! hash vndr; then echo "Please install vndr with \"go get github.com/LK4D4/vndr\" and put it in your \$GOPATH" exit 1 fi if [ $# -eq 0 ] || [ "$1" = "archive/tar" ]; then echo "update vendored copy of archive/tar" : "${GO_VERSION:=$(awk -F '[ =]' '$1 == "ARG" && $2 == "GO_VERSION" { print $3; exit }' ./Dockerfile)}" rm -rf vendor/archive mkdir -p ./vendor/archive/tar echo "downloading: https://golang.org/dl/go${GO_VERSION%.0}.src.tar.gz" curl -fsSL "https://golang.org/dl/go${GO_VERSION%.0}.src.tar.gz" \ | tar --extract --gzip --directory=vendor/archive/tar --strip-components=4 go/src/archive/tar patch --strip=4 --directory=vendor/archive/tar --input="$PWD/patches/0001-archive-tar-do-not-populate-user-group-names.patch" fi if [ $# -eq 0 ] || [ "$1" != "archive/tar" ]; then vndr -whitelist='^archive[/\\]tar' "$@" fi
thaJeztah
8c8e4e3271e05248a3a11c6165aa1e6229295446
ab1559c97c3faa9c9a64539c3bb0f4a36c795488
Ah, thanks! I always struggle with these and forget the correct syntax, LOL. Yup, that's what my intent was
thaJeztah
4,488
moby/moby
42,776
info: remove "expected" check for tini version
These checks were added when we required a specific version of containerd and runc (different versions were known to be incompatible). I don't think we had a similar requirement for tini, so this check was redundant. Let's remove the check altogether. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-08-23 11:26:19+00:00
2021-08-23 17:40:09+00:00
daemon/info_unix.go
// +build !windows package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "os/exec" "path/filepath" "strings" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/sysinfo" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // fillPlatformInfo fills the platform related info. func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { v.CgroupDriver = daemon.getCgroupDriver() v.CgroupVersion = "1" if sysInfo.CgroupUnified { v.CgroupVersion = "2" } v.MemoryLimit = sysInfo.MemoryLimit v.SwapLimit = sysInfo.SwapLimit v.KernelMemory = sysInfo.KernelMemory v.KernelMemoryTCP = sysInfo.KernelMemoryTCP v.OomKillDisable = sysInfo.OomKillDisable v.CPUCfsPeriod = sysInfo.CPUCfs v.CPUCfsQuota = sysInfo.CPUCfs v.CPUShares = sysInfo.CPUShares v.CPUSet = sysInfo.Cpuset v.PidsLimit = sysInfo.PidsLimit v.Runtimes = daemon.configStore.GetAllRuntimes() v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() v.InitBinary = daemon.configStore.GetInitPath() defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) v.RuncCommit.ID = "N/A" } else { v.RuncCommit.ID = commit } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) v.RuncCommit.ID = "N/A" } // runc is now shipped as a separate package. Set "expected" to same value // as "ID" to prevent clients from reporting a version-mismatch v.RuncCommit.Expected = v.RuncCommit.ID if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.ContainerdCommit.ID = rv.Revision } else { logrus.Warnf("failed to retrieve containerd version: %v", err) v.ContainerdCommit.ID = "N/A" } // containerd is now shipped as a separate package. Set "expected" to same // value as "ID" to prevent clients from reporting a version-mismatch v.ContainerdCommit.Expected = v.ContainerdCommit.ID // TODO is there still a need to check the expected version for tini? // if not, we can change this, and just set "Expected" to v.InitCommit.ID v.InitCommit.Expected = dockerversion.InitCommitID defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if _, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) v.InitCommit.ID = "N/A" } else { v.InitCommit.ID = commit if len(dockerversion.InitCommitID) > len(commit) { v.InitCommit.Expected = dockerversion.InitCommitID[0:len(commit)] } } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) v.InitCommit.ID = "N/A" } if v.CgroupDriver == cgroupNoneDriver { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. Systemd is required to enable cgroups in rootless-mode.") } else { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. To enable cgroups in rootless-mode, you need to boot the system in cgroup v2 mode.") } } else { if !v.MemoryLimit { v.Warnings = append(v.Warnings, "WARNING: No memory limit support") } if !v.SwapLimit { v.Warnings = append(v.Warnings, "WARNING: No swap limit support") } if !v.KernelMemoryTCP && v.CgroupVersion == "1" { // kernel memory is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No kernel memory TCP limit support") } if !v.OomKillDisable && v.CgroupVersion == "1" { // oom kill disable is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No oom kill disable support") } if !v.CPUCfsQuota { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs quota support") } if !v.CPUCfsPeriod { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs period support") } if !v.CPUShares { v.Warnings = append(v.Warnings, "WARNING: No cpu shares support") } if !v.CPUSet { v.Warnings = append(v.Warnings, "WARNING: No cpuset support") } // TODO add fields for these options in types.Info if !sysInfo.BlkioWeight && v.CgroupVersion == "2" { // blkio weight is not available on cgroup v1 since kernel 5.0. // Warning is not printed on cgroup v1, because there is no action user can take. // On cgroup v2, blkio weight is implemented using io.weight v.Warnings = append(v.Warnings, "WARNING: No io.weight support") } if !sysInfo.BlkioWeightDevice && v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.weight (per device) support") } if !sysInfo.BlkioReadBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (rbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_bps_device support") } } if !sysInfo.BlkioWriteBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_bps_device support") } } if !sysInfo.BlkioReadIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (riops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_iops_device support") } } if !sysInfo.BlkioWriteIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wiops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_iops_device support") } } } if !v.IPv4Forwarding { v.Warnings = append(v.Warnings, "WARNING: IPv4 forwarding is disabled") } if !v.BridgeNfIptables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-iptables is disabled") } if !v.BridgeNfIP6tables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-ip6tables is disabled") } } func (daemon *Daemon) fillPlatformVersion(v *types.Version) { if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.Components = append(v.Components, types.ComponentVersion{ Name: "containerd", Version: rv.Version, Details: map[string]string{ "GitCommit": rv.Revision, }, }) } defaultRuntime := daemon.configStore.GetDefaultRuntimeName() defaultRuntimeBinary := daemon.configStore.GetRuntime(defaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: defaultRuntime, Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) } defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if ver, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: filepath.Base(defaultInitBinary), Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } } func fillDriverWarnings(v *types.Info) { for _, pair := range v.DriverStatus { if pair[0] == "Data loop file" { msg := fmt.Sprintf("WARNING: %s: usage of loopback devices is "+ "strongly discouraged for production use.\n "+ "Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.", v.Driver) v.Warnings = append(v.Warnings, msg) continue } if pair[0] == "Supports d_type" && pair[1] == "false" { backingFs := getBackingFs(v) msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", v.Driver, backingFs) if backingFs == "xfs" { msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n" } msg += " Running without d_type support will not be supported in future releases." v.Warnings = append(v.Warnings, msg) continue } } } func getBackingFs(v *types.Info) string { for _, pair := range v.DriverStatus { if pair[0] == "Backing Filesystem" { return pair[1] } } return "" } // parseInitVersion parses a Tini version string, and extracts the "version" // and "git commit" from the output. // // Output example from `docker-init --version`: // // tini version 0.18.0 - git.fec3683 func parseInitVersion(v string) (version string, commit string, err error) { parts := strings.Split(v, " - ") if len(parts) >= 2 { gitParts := strings.Split(strings.TrimSpace(parts[1]), ".") if len(gitParts) == 2 && gitParts[0] == "git" { commit = gitParts[1] } } parts[0] = strings.TrimSpace(parts[0]) if strings.HasPrefix(parts[0], "tini version ") { version = strings.TrimPrefix(parts[0], "tini version ") } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return version, commit, err } // parseRuntimeVersion parses the output of `[runtime] --version` and extracts the // "name", "version" and "git commit" from the output. // // Output example from `runc --version`: // // runc version 1.0.0-rc5+dev // commit: 69663f0bd4b60df09991c08812a60108003fa340 // spec: 1.0.0 func parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) { lines := strings.Split(strings.TrimSpace(v), "\n") for _, line := range lines { if strings.Contains(line, "version") { s := strings.Split(line, "version") runtime = strings.TrimSpace(s[0]) version = strings.TrimSpace(s[len(s)-1]) continue } if strings.HasPrefix(line, "commit:") { commit = strings.TrimSpace(strings.TrimPrefix(line, "commit:")) continue } } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return runtime, version, commit, err } func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool { return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate() } // Rootless returns true if daemon is running in rootless mode func (daemon *Daemon) Rootless() bool { return daemon.configStore.Rootless }
// +build !windows package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "os/exec" "path/filepath" "strings" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/sysinfo" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // fillPlatformInfo fills the platform related info. func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { v.CgroupDriver = daemon.getCgroupDriver() v.CgroupVersion = "1" if sysInfo.CgroupUnified { v.CgroupVersion = "2" } v.MemoryLimit = sysInfo.MemoryLimit v.SwapLimit = sysInfo.SwapLimit v.KernelMemory = sysInfo.KernelMemory v.KernelMemoryTCP = sysInfo.KernelMemoryTCP v.OomKillDisable = sysInfo.OomKillDisable v.CPUCfsPeriod = sysInfo.CPUCfs v.CPUCfsQuota = sysInfo.CPUCfs v.CPUShares = sysInfo.CPUShares v.CPUSet = sysInfo.Cpuset v.PidsLimit = sysInfo.PidsLimit v.Runtimes = daemon.configStore.GetAllRuntimes() v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() v.InitBinary = daemon.configStore.GetInitPath() v.RuncCommit.ID = "N/A" v.ContainerdCommit.ID = "N/A" v.InitCommit.ID = "N/A" defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) } else { v.RuncCommit.ID = commit } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) } if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.ContainerdCommit.ID = rv.Revision } else { logrus.Warnf("failed to retrieve containerd version: %v", err) } defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if _, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) } else { v.InitCommit.ID = commit } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } // Set expected and actual commits to the same value to prevent the client // showing that the version does not match the "expected" version/commit. v.RuncCommit.Expected = v.RuncCommit.ID v.ContainerdCommit.Expected = v.ContainerdCommit.ID v.InitCommit.Expected = v.InitCommit.ID if v.CgroupDriver == cgroupNoneDriver { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. Systemd is required to enable cgroups in rootless-mode.") } else { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. To enable cgroups in rootless-mode, you need to boot the system in cgroup v2 mode.") } } else { if !v.MemoryLimit { v.Warnings = append(v.Warnings, "WARNING: No memory limit support") } if !v.SwapLimit { v.Warnings = append(v.Warnings, "WARNING: No swap limit support") } if !v.KernelMemoryTCP && v.CgroupVersion == "1" { // kernel memory is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No kernel memory TCP limit support") } if !v.OomKillDisable && v.CgroupVersion == "1" { // oom kill disable is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No oom kill disable support") } if !v.CPUCfsQuota { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs quota support") } if !v.CPUCfsPeriod { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs period support") } if !v.CPUShares { v.Warnings = append(v.Warnings, "WARNING: No cpu shares support") } if !v.CPUSet { v.Warnings = append(v.Warnings, "WARNING: No cpuset support") } // TODO add fields for these options in types.Info if !sysInfo.BlkioWeight && v.CgroupVersion == "2" { // blkio weight is not available on cgroup v1 since kernel 5.0. // Warning is not printed on cgroup v1, because there is no action user can take. // On cgroup v2, blkio weight is implemented using io.weight v.Warnings = append(v.Warnings, "WARNING: No io.weight support") } if !sysInfo.BlkioWeightDevice && v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.weight (per device) support") } if !sysInfo.BlkioReadBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (rbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_bps_device support") } } if !sysInfo.BlkioWriteBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_bps_device support") } } if !sysInfo.BlkioReadIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (riops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_iops_device support") } } if !sysInfo.BlkioWriteIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wiops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_iops_device support") } } } if !v.IPv4Forwarding { v.Warnings = append(v.Warnings, "WARNING: IPv4 forwarding is disabled") } if !v.BridgeNfIptables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-iptables is disabled") } if !v.BridgeNfIP6tables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-ip6tables is disabled") } } func (daemon *Daemon) fillPlatformVersion(v *types.Version) { if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.Components = append(v.Components, types.ComponentVersion{ Name: "containerd", Version: rv.Version, Details: map[string]string{ "GitCommit": rv.Revision, }, }) } defaultRuntime := daemon.configStore.GetDefaultRuntimeName() defaultRuntimeBinary := daemon.configStore.GetRuntime(defaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: defaultRuntime, Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) } defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if ver, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: filepath.Base(defaultInitBinary), Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } } func fillDriverWarnings(v *types.Info) { for _, pair := range v.DriverStatus { if pair[0] == "Data loop file" { msg := fmt.Sprintf("WARNING: %s: usage of loopback devices is "+ "strongly discouraged for production use.\n "+ "Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.", v.Driver) v.Warnings = append(v.Warnings, msg) continue } if pair[0] == "Supports d_type" && pair[1] == "false" { backingFs := getBackingFs(v) msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", v.Driver, backingFs) if backingFs == "xfs" { msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n" } msg += " Running without d_type support will not be supported in future releases." v.Warnings = append(v.Warnings, msg) continue } } } func getBackingFs(v *types.Info) string { for _, pair := range v.DriverStatus { if pair[0] == "Backing Filesystem" { return pair[1] } } return "" } // parseInitVersion parses a Tini version string, and extracts the "version" // and "git commit" from the output. // // Output example from `docker-init --version`: // // tini version 0.18.0 - git.fec3683 func parseInitVersion(v string) (version string, commit string, err error) { parts := strings.Split(v, " - ") if len(parts) >= 2 { gitParts := strings.Split(strings.TrimSpace(parts[1]), ".") if len(gitParts) == 2 && gitParts[0] == "git" { commit = gitParts[1] } } parts[0] = strings.TrimSpace(parts[0]) if strings.HasPrefix(parts[0], "tini version ") { version = strings.TrimPrefix(parts[0], "tini version ") } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return version, commit, err } // parseRuntimeVersion parses the output of `[runtime] --version` and extracts the // "name", "version" and "git commit" from the output. // // Output example from `runc --version`: // // runc version 1.0.0-rc5+dev // commit: 69663f0bd4b60df09991c08812a60108003fa340 // spec: 1.0.0 func parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) { lines := strings.Split(strings.TrimSpace(v), "\n") for _, line := range lines { if strings.Contains(line, "version") { s := strings.Split(line, "version") runtime = strings.TrimSpace(s[0]) version = strings.TrimSpace(s[len(s)-1]) continue } if strings.HasPrefix(line, "commit:") { commit = strings.TrimSpace(strings.TrimPrefix(line, "commit:")) continue } } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return runtime, version, commit, err } func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool { return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate() } // Rootless returns true if daemon is running in rootless mode func (daemon *Daemon) Rootless() bool { return daemon.configStore.Rootless }
thaJeztah
2740726ce1ed35bcbb8a364fddf6687aaeca8fda
b8b5fd63f1f0d0a1bc64d4413cbf00aa6c350e88
Bit on the fence if we need to keep the `N/A` value on the server-side, or if we should move this to the cli. I'd love to move it there, but will have to think if there could be a breaking change because of that. I might still do so in a follow-up though 😅
thaJeztah
4,489
moby/moby
42,770
api/types/events: add "Type" type for event-type enum
Currently just an alias for string, but we can change it to be an actual type. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-08-21 22:39:14+00:00
2021-08-27 15:23:56+00:00
api/types/events/events.go
package events // import "github.com/docker/docker/api/types/events" const ( // BuilderEventType is the event type that the builder generates BuilderEventType = "builder" // ContainerEventType is the event type that containers generate ContainerEventType = "container" // DaemonEventType is the event type that daemon generate DaemonEventType = "daemon" // ImageEventType is the event type that images generate ImageEventType = "image" // NetworkEventType is the event type that networks generate NetworkEventType = "network" // PluginEventType is the event type that plugins generate PluginEventType = "plugin" // VolumeEventType is the event type that volumes generate VolumeEventType = "volume" // ServiceEventType is the event type that services generate ServiceEventType = "service" // NodeEventType is the event type that nodes generate NodeEventType = "node" // SecretEventType is the event type that secrets generate SecretEventType = "secret" // ConfigEventType is the event type that configs generate ConfigEventType = "config" ) // Actor describes something that generates events, // like a container, or a network, or a volume. // It has a defined name and a set or attributes. // The container attributes are its labels, other actors // can generate these attributes from other properties. type Actor struct { ID string Attributes map[string]string } // Message represents the information an event contains type Message struct { // Deprecated information from JSONMessage. // With data only in container events. Status string `json:"status,omitempty"` ID string `json:"id,omitempty"` From string `json:"from,omitempty"` Type string Action string Actor Actor // Engine events are local scope. Cluster events are swarm scope. Scope string `json:"scope,omitempty"` Time int64 `json:"time,omitempty"` TimeNano int64 `json:"timeNano,omitempty"` }
package events // import "github.com/docker/docker/api/types/events" // Type is used for event-types. type Type = string // List of known event types. const ( BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates. ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate. ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate. DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate. ImageEventType Type = "image" // ImageEventType is the event type that images generate. NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate. NodeEventType Type = "node" // NodeEventType is the event type that nodes generate. PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate. SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate. ServiceEventType Type = "service" // ServiceEventType is the event type that services generate. VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate. ) // Actor describes something that generates events, // like a container, or a network, or a volume. // It has a defined name and a set of attributes. // The container attributes are its labels, other actors // can generate these attributes from other properties. type Actor struct { ID string Attributes map[string]string } // Message represents the information an event contains type Message struct { // Deprecated information from JSONMessage. // With data only in container events. Status string `json:"status,omitempty"` // Deprecated: use Action instead. ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead. From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead. Type Type Action string Actor Actor // Engine events are local scope. Cluster events are swarm scope. Scope string `json:"scope,omitempty"` Time int64 `json:"time,omitempty"` TimeNano int64 `json:"timeNano,omitempty"` }
thaJeztah
9e7bbdb9ba53440cfc67e2e1de89e35e44c43ddf
0cd1bd42b405b6aab12d2375d85919a73e65e7c9
Hm... I should've sorted this list while I was changing this. Let me get that sorted (pun intended) before we merge this 😅
thaJeztah
4,490
moby/moby
42,769
swagger: assorted fixes and updates
includes some follow-ups to https://github.com/moby/moby/pull/42621 - api/swagger: fix up event-types and move to definitions - api/swagger: rename PluginPrivilegeItem to PluginPrivilege - api/swagger: move DistributionInspect to definitions **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-08-21 22:31:49+00:00
2021-09-02 21:23:49+00:00
api/swagger.yaml
# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. # # This is used for generating API documentation and the types used by the # client/server. See api/README.md for more information. # # Some style notes: # - This file is used by ReDoc, which allows GitHub Flavored Markdown in # descriptions. # - There is no maximum line length, for ease of editing and pretty diffs. # - operationIds are in the format "NounVerb", with a singular noun. swagger: "2.0" schemes: - "http" - "https" produces: - "application/json" - "text/plain" consumes: - "application/json" - "text/plain" basePath: "/v1.42" info: title: "Docker Engine API" version: "1.42" x-logo: url: "https://docs.docker.com/images/logo-docker-main.png" description: | The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. # Errors The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: ``` { "message": "page not found" } ``` # Versioning The API is usually changed in each release, so API calls are versioned to ensure that clients don't break. To lock to a specific version of the API, you prefix the URL with its version, for example, call `/v1.30/info` to use the v1.30 version of the `/info` endpoint. If the API version specified in the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. If you omit the version-prefix, the current version of the API (v1.42) is used. For example, calling `/info` is the same as calling `/v1.42/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer daemons. # Authentication Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) (JSON) string with the following structure: ``` { "username": "string", "password": "string", "email": "string", "serveraddress": "string" } ``` The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: ``` { "identitytoken": "9cbaf023786cd7..." } ``` # The tags on paths define the menu sections in the ReDoc documentation, so # the usage of tags must make sense for that: # - They should be singular, not plural. # - There should not be too many tags, or the menu becomes unwieldy. For # example, it is preferable to add a path to the "System" tag instead of # creating a tag with a single path in it. # - The order of tags in this list defines the order in the menu. tags: # Primary objects - name: "Container" x-displayName: "Containers" description: | Create and manage containers. - name: "Image" x-displayName: "Images" - name: "Network" x-displayName: "Networks" description: | Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/network/) for more information. - name: "Volume" x-displayName: "Volumes" description: | Create and manage persistent storage that can be attached to containers. - name: "Exec" x-displayName: "Exec" description: | Run new commands inside running containers. Refer to the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. # Swarm things - name: "Swarm" x-displayName: "Swarm" description: | Engines can be clustered together in a swarm. Refer to the [swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. - name: "Node" x-displayName: "Nodes" description: | Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Service" x-displayName: "Services" description: | Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Task" x-displayName: "Tasks" description: | A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. - name: "Secret" x-displayName: "Secrets" description: | Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. - name: "Config" x-displayName: "Configs" description: | Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work. # System things - name: "Plugin" x-displayName: "Plugins" - name: "System" x-displayName: "System" definitions: Port: type: "object" description: "An open port on a container" required: [PrivatePort, Type] properties: IP: type: "string" format: "ip-address" description: "Host IP address that the container's port is mapped to" PrivatePort: type: "integer" format: "uint16" x-nullable: false description: "Port on the container" PublicPort: type: "integer" format: "uint16" description: "Port exposed on the host" Type: type: "string" x-nullable: false enum: ["tcp", "udp", "sctp"] example: PrivatePort: 8080 PublicPort: 80 Type: "tcp" MountPoint: type: "object" description: "A mount point inside a container" properties: Type: type: "string" Name: type: "string" Source: type: "string" Destination: type: "string" Driver: type: "string" Mode: type: "string" RW: type: "boolean" Propagation: type: "string" DeviceMapping: type: "object" description: "A device mapping between the host and container" properties: PathOnHost: type: "string" PathInContainer: type: "string" CgroupPermissions: type: "string" example: PathOnHost: "/dev/deviceName" PathInContainer: "/dev/deviceName" CgroupPermissions: "mrw" DeviceRequest: type: "object" description: "A request for devices to be sent to device drivers" properties: Driver: type: "string" example: "nvidia" Count: type: "integer" example: -1 DeviceIDs: type: "array" items: type: "string" example: - "0" - "1" - "GPU-fef8089b-4820-abfc-e83e-94318197576e" Capabilities: description: | A list of capabilities; an OR list of AND lists of capabilities. type: "array" items: type: "array" items: type: "string" example: # gpu AND nvidia AND compute - ["gpu", "nvidia", "compute"] Options: description: | Driver-specific options, specified as a key/value pairs. These options are passed directly to the driver. type: "object" additionalProperties: type: "string" ThrottleDevice: type: "object" properties: Path: description: "Device path" type: "string" Rate: description: "Rate" type: "integer" format: "int64" minimum: 0 Mount: type: "object" properties: Target: description: "Container path." type: "string" Source: description: "Mount source (e.g. a volume name, a host path)." type: "string" Type: description: | The mount type. Available types: - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. type: "string" enum: - "bind" - "volume" - "tmpfs" - "npipe" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" Consistency: description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." type: "string" BindOptions: description: "Optional configuration for the `bind` type." type: "object" properties: Propagation: description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." type: "string" enum: - "private" - "rprivate" - "shared" - "rshared" - "slave" - "rslave" NonRecursive: description: "Disable recursive bind mount." type: "boolean" default: false VolumeOptions: description: "Optional configuration for the `volume` type." type: "object" properties: NoCopy: description: "Populate volume with data from the target." type: "boolean" default: false Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" DriverConfig: description: "Map of driver specific options" type: "object" properties: Name: description: "Name of the driver to use to create the volume." type: "string" Options: description: "key/value map of driver specific options." type: "object" additionalProperties: type: "string" TmpfsOptions: description: "Optional configuration for the `tmpfs` type." type: "object" properties: SizeBytes: description: "The size for the tmpfs mount in bytes." type: "integer" format: "int64" Mode: description: "The permission mode for the tmpfs mount in an integer." type: "integer" RestartPolicy: description: | The behavior to apply when the container exits. The default is not to restart. An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. type: "object" properties: Name: type: "string" description: | - Empty string means not to restart - `no` Do not automatically restart - `always` Always restart - `unless-stopped` Restart always except when the user has manually stopped the container - `on-failure` Restart only when the container exit code is non-zero enum: - "" - "no" - "always" - "unless-stopped" - "on-failure" MaximumRetryCount: type: "integer" description: | If `on-failure` is used, the number of times to retry before giving up. Resources: description: "A container's resources (cgroups config, ulimits, etc)" type: "object" properties: # Applicable to all platforms CpuShares: description: | An integer value representing this container's relative CPU weight versus other containers. type: "integer" Memory: description: "Memory limit in bytes." type: "integer" format: "int64" default: 0 # Applicable to UNIX platforms CgroupParent: description: | Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. type: "string" BlkioWeight: description: "Block IO weight (relative weight)." type: "integer" minimum: 0 maximum: 1000 BlkioWeightDevice: description: | Block IO weight (relative device weight) in the form: ``` [{"Path": "device_path", "Weight": weight}] ``` type: "array" items: type: "object" properties: Path: type: "string" Weight: type: "integer" minimum: 0 BlkioDeviceReadBps: description: | Limit read rate (bytes per second) from a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteBps: description: | Limit write rate (bytes per second) to a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceReadIOps: description: | Limit read rate (IO per second) from a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteIOps: description: | Limit write rate (IO per second) to a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" CpuPeriod: description: "The length of a CPU period in microseconds." type: "integer" format: "int64" CpuQuota: description: | Microseconds of CPU time that the container can get in a CPU period. type: "integer" format: "int64" CpuRealtimePeriod: description: | The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpuRealtimeRuntime: description: | The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpusetCpus: description: | CPUs in which to allow execution (e.g., `0-3`, `0,1`). type: "string" example: "0-3" CpusetMems: description: | Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. type: "string" Devices: description: "A list of devices to add to the container." type: "array" items: $ref: "#/definitions/DeviceMapping" DeviceCgroupRules: description: "a list of cgroup rules to apply to the container" type: "array" items: type: "string" example: "c 13:* rwm" DeviceRequests: description: | A list of requests for devices to be sent to device drivers. type: "array" items: $ref: "#/definitions/DeviceRequest" KernelMemory: description: | Kernel memory limit in bytes. <p><br /></p> > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated > `kmem.limit_in_bytes`. type: "integer" format: "int64" example: 209715200 KernelMemoryTCP: description: "Hard limit for kernel TCP buffer memory (in bytes)." type: "integer" format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" format: "int64" MemorySwap: description: | Total memory limit (memory + swap). Set as `-1` to enable unlimited swap. type: "integer" format: "int64" MemorySwappiness: description: | Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. type: "integer" format: "int64" minimum: 0 maximum: 100 NanoCpus: description: "CPU quota in units of 10<sup>-9</sup> CPUs." type: "integer" format: "int64" OomKillDisable: description: "Disable OOM Killer for the container." type: "boolean" Init: description: | Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. type: "boolean" x-nullable: true PidsLimit: description: | Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. type: "integer" format: "int64" x-nullable: true Ulimits: description: | A list of resource limits to set in the container. For example: ``` {"Name": "nofile", "Soft": 1024, "Hard": 2048} ``` type: "array" items: type: "object" properties: Name: description: "Name of ulimit" type: "string" Soft: description: "Soft limit" type: "integer" Hard: description: "Hard limit" type: "integer" # Applicable to Windows CpuCount: description: | The number of usable CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" CpuPercent: description: | The usable percentage of the available CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" IOMaximumIOps: description: "Maximum IOps for the container system drive (Windows only)" type: "integer" format: "int64" IOMaximumBandwidth: description: | Maximum IO in bytes per second for the container system drive (Windows only). type: "integer" format: "int64" Limit: description: | An object describing a limit on resources which can be requested by a task. type: "object" properties: NanoCPUs: type: "integer" format: "int64" example: 4000000000 MemoryBytes: type: "integer" format: "int64" example: 8272408576 Pids: description: | Limits the maximum number of PIDs in the container. Set `0` for unlimited. type: "integer" format: "int64" default: 0 example: 100 ResourceObject: description: | An object describing the resources which can be advertised by a node and requested by a task. type: "object" properties: NanoCPUs: type: "integer" format: "int64" example: 4000000000 MemoryBytes: type: "integer" format: "int64" example: 8272408576 GenericResources: $ref: "#/definitions/GenericResources" GenericResources: description: | User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`). type: "array" items: type: "object" properties: NamedResourceSpec: type: "object" properties: Kind: type: "string" Value: type: "string" DiscreteResourceSpec: type: "object" properties: Kind: type: "string" Value: type: "integer" format: "int64" example: - DiscreteResourceSpec: Kind: "SSD" Value: 3 - NamedResourceSpec: Kind: "GPU" Value: "UUID1" - NamedResourceSpec: Kind: "GPU" Value: "UUID2" HealthConfig: description: "A test to perform to check that the container is healthy." type: "object" properties: Test: description: | The test to perform. Possible values are: - `[]` inherit healthcheck from image or parent image - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell type: "array" items: type: "string" Interval: description: | The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Timeout: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Retries: description: | The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit. type: "integer" StartPeriod: description: | Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Health: description: | Health stores information about the container's healthcheck results. type: "object" properties: Status: description: | Status is one of `none`, `starting`, `healthy` or `unhealthy` - "none" Indicates there is no healthcheck - "starting" Starting indicates that the container is not yet ready - "healthy" Healthy indicates that the container is running correctly - "unhealthy" Unhealthy indicates that the container has a problem type: "string" enum: - "none" - "starting" - "healthy" - "unhealthy" example: "healthy" FailingStreak: description: "FailingStreak is the number of consecutive failures" type: "integer" example: 0 Log: type: "array" description: | Log contains the last few results (oldest first) items: x-nullable: true $ref: "#/definitions/HealthcheckResult" HealthcheckResult: description: | HealthcheckResult stores information about a single run of a healthcheck probe type: "object" properties: Start: description: | Date and time at which this check started in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "date-time" example: "2020-01-04T10:44:24.496525531Z" End: description: | Date and time at which this check ended in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2020-01-04T10:45:21.364524523Z" ExitCode: description: | ExitCode meanings: - `0` healthy - `1` unhealthy - `2` reserved (considered unhealthy) - other values: error running probe type: "integer" example: 0 Output: description: "Output from last check" type: "string" HostConfig: description: "Container configuration that depends on the host we are running on" allOf: - $ref: "#/definitions/Resources" - type: "object" properties: # Applicable to all platforms Binds: type: "array" description: | A list of volume bindings for this container. Each volume binding is a string in one of these forms: - `host-src:container-dest[:options]` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - `volume-name:container-dest[:options]` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. `options` is an optional, comma-delimited list of: - `nocopy` disables automatic copying of data from the container path to the volume. The `nocopy` flag only applies to named volumes. - `[ro|rw]` mounts a volume read-only or read-write, respectively. If omitted or set to `rw`, volumes are mounted read-write. - `[z|Z]` applies SELinux labels to allow or deny multiple containers to read and write to the same volume. - `z`: a _shared_ content label is applied to the content. This label indicates that multiple containers can share the volume content, for both reading and writing. - `Z`: a _private unshared_ label is applied to the content. This label indicates that only the current container can use a private volume. Labeling systems such as SELinux require proper labels to be placed on volume content that is mounted into a container. Without a label, the security system can prevent a container's processes from using the content. By default, the labels set by the host operating system are not modified. - `[[r]shared|[r]slave|[r]private]` specifies mount [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). This only applies to bind-mounted volumes, not internal volumes or named volumes. Mount propagation requires the source mount point (the location where the source directory is mounted in the host operating system) to have the correct propagation properties. For shared volumes, the source mount point must be set to `shared`. For slave volumes, the mount must be set to either `shared` or `slave`. items: type: "string" ContainerIDFile: type: "string" description: "Path to a file where the container ID is written" LogConfig: type: "object" description: "The logging configuration for this container" properties: Type: type: "string" enum: - "json-file" - "syslog" - "journald" - "gelf" - "fluentd" - "awslogs" - "splunk" - "etwlogs" - "none" Config: type: "object" additionalProperties: type: "string" NetworkMode: type: "string" description: | Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken as a custom network's name to which this container should connect to. PortBindings: $ref: "#/definitions/PortMap" RestartPolicy: $ref: "#/definitions/RestartPolicy" AutoRemove: type: "boolean" description: | Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set. VolumeDriver: type: "string" description: "Driver that this container uses to mount volumes." VolumesFrom: type: "array" description: | A list of volumes to inherit from another container, specified in the form `<container name>[:<ro|rw>]`. items: type: "string" Mounts: description: | Specification for mounts to be added to the container. type: "array" items: $ref: "#/definitions/Mount" # Applicable to UNIX platforms CapAdd: type: "array" description: | A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'. items: type: "string" CapDrop: type: "array" description: | A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'. items: type: "string" CgroupnsMode: type: "string" enum: - "private" - "host" description: | cgroup namespace mode for the container. Possible values are: - `"private"`: the container runs in its own private cgroup namespace - `"host"`: use the host system's cgroup namespace If not specified, the daemon default is used, which can either be `"private"` or `"host"`, depending on daemon version, kernel support and configuration. Dns: type: "array" description: "A list of DNS servers for the container to use." items: type: "string" DnsOptions: type: "array" description: "A list of DNS options." items: type: "string" DnsSearch: type: "array" description: "A list of DNS search domains." items: type: "string" ExtraHosts: type: "array" description: | A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. items: type: "string" GroupAdd: type: "array" description: | A list of additional groups that the container process will run as. items: type: "string" IpcMode: type: "string" description: | IPC sharing mode for the container. Possible values are: - `"none"`: own private IPC namespace, with /dev/shm not mounted - `"private"`: own private IPC namespace - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers - `"container:<name|id>"`: join another (shareable) container's IPC namespace - `"host"`: use the host system's IPC namespace If not specified, daemon default is used, which can either be `"private"` or `"shareable"`, depending on daemon version and configuration. Cgroup: type: "string" description: "Cgroup to use for the container." Links: type: "array" description: | A list of links for the container in the form `container_name:alias`. items: type: "string" OomScoreAdj: type: "integer" description: | An integer value containing the score given to the container in order to tune OOM killer preferences. example: 500 PidMode: type: "string" description: | Set the PID (Process) Namespace mode for the container. It can be either: - `"container:<name|id>"`: joins another container's PID namespace - `"host"`: use the host's PID namespace inside the container Privileged: type: "boolean" description: "Gives the container full access to the host." PublishAllPorts: type: "boolean" description: | Allocates an ephemeral host port for all of a container's exposed ports. Ports are de-allocated when the container stops and allocated when the container starts. The allocated port might be changed when restarting the container. The port is selected from the ephemeral port range that depends on the kernel. For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. ReadonlyRootfs: type: "boolean" description: "Mount the container's root filesystem as read only." SecurityOpt: type: "array" description: "A list of string values to customize labels for MLS systems, such as SELinux." items: type: "string" StorageOpt: type: "object" description: | Storage driver options for this container, in the form `{"size": "120G"}`. additionalProperties: type: "string" Tmpfs: type: "object" description: | A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: ``` { "/run": "rw,noexec,nosuid,size=65536k" } ``` additionalProperties: type: "string" UTSMode: type: "string" description: "UTS namespace to use for the container." UsernsMode: type: "string" description: | Sets the usernamespace mode for the container when usernamespace remapping option is enabled. ShmSize: type: "integer" description: | Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. minimum: 0 Sysctls: type: "object" description: | A list of kernel parameters (sysctls) to set in the container. For example: ``` {"net.ipv4.ip_forward": "1"} ``` additionalProperties: type: "string" Runtime: type: "string" description: "Runtime to use with this container." # Applicable to Windows ConsoleSize: type: "array" description: | Initial console size, as an `[height, width]` array. (Windows only) minItems: 2 maxItems: 2 items: type: "integer" minimum: 0 Isolation: type: "string" description: | Isolation technology of the container. (Windows only) enum: - "default" - "process" - "hyperv" MaskedPaths: type: "array" description: | The list of paths to be masked inside the container (this overrides the default set of paths). items: type: "string" ReadonlyPaths: type: "array" description: | The list of paths to be set as read-only inside the container (this overrides the default set of paths). items: type: "string" ContainerConfig: description: "Configuration for a container that is portable between hosts" type: "object" properties: Hostname: description: "The hostname to use for the container, as a valid RFC 1123 hostname." type: "string" Domainname: description: "The domain name to use for the container." type: "string" User: description: "The user that commands are run as inside the container." type: "string" AttachStdin: description: "Whether to attach to `stdin`." type: "boolean" default: false AttachStdout: description: "Whether to attach to `stdout`." type: "boolean" default: true AttachStderr: description: "Whether to attach to `stderr`." type: "boolean" default: true ExposedPorts: description: | An object mapping ports to an empty object in the form: `{"<port>/<tcp|udp|sctp>": {}}` type: "object" additionalProperties: type: "object" enum: - {} default: {} Tty: description: | Attach standard streams to a TTY, including `stdin` if it is not closed. type: "boolean" default: false OpenStdin: description: "Open `stdin`" type: "boolean" default: false StdinOnce: description: "Close `stdin` after one attached client disconnects" type: "boolean" default: false Env: description: | A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. type: "array" items: type: "string" Cmd: description: | Command to run specified as a string or an array of strings. type: "array" items: type: "string" Healthcheck: $ref: "#/definitions/HealthConfig" ArgsEscaped: description: "Command is already escaped (Windows only)" type: "boolean" Image: description: | The name of the image to use when creating the container/ type: "string" Volumes: description: | An object mapping mount point paths inside the container to empty objects. type: "object" additionalProperties: type: "object" enum: - {} default: {} WorkingDir: description: "The working directory for commands to run in." type: "string" Entrypoint: description: | The entry point for the container as a string or an array of strings. If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). type: "array" items: type: "string" NetworkDisabled: description: "Disable networking for the container." type: "boolean" MacAddress: description: "MAC address of the container." type: "string" OnBuild: description: | `ONBUILD` metadata that were defined in the image's `Dockerfile`. type: "array" items: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" StopSignal: description: | Signal to stop a container as a string or unsigned integer. type: "string" default: "SIGTERM" StopTimeout: description: "Timeout to stop a container in seconds." type: "integer" default: 10 Shell: description: | Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. type: "array" items: type: "string" NetworkingConfig: description: | NetworkingConfig represents the container's networking configuration for each of its interfaces. It is used for the networking configs specified in the `docker create` and `docker network connect` commands. type: "object" properties: EndpointsConfig: description: | A mapping of network name to endpoint configuration for that network. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" example: # putting an example here, instead of using the example values from # /definitions/EndpointSettings, because containers/create currently # does not support attaching to multiple networks, so the example request # would be confusing if it showed that multiple networks can be contained # in the EndpointsConfig. # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) EndpointsConfig: isolated_nw: IPAMConfig: IPv4Address: "172.20.30.33" IPv6Address: "2001:db8:abcd::3033" LinkLocalIPs: - "169.254.34.68" - "fe80::3468" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" NetworkSettings: description: "NetworkSettings exposes the network settings in the API" type: "object" properties: Bridge: description: Name of the network's bridge (for example, `docker0`). type: "string" example: "docker0" SandboxID: description: SandboxID uniquely represents a container's network stack. type: "string" example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" HairpinMode: description: | Indicates if hairpin NAT should be enabled on the virtual interface. type: "boolean" example: false LinkLocalIPv6Address: description: IPv6 unicast address using the link-local prefix. type: "string" example: "fe80::42:acff:fe11:1" LinkLocalIPv6PrefixLen: description: Prefix length of the IPv6 unicast address. type: "integer" example: "64" Ports: $ref: "#/definitions/PortMap" SandboxKey: description: SandboxKey identifies the sandbox type: "string" example: "/var/run/docker/netns/8ab54b426c38" # TODO is SecondaryIPAddresses actually used? SecondaryIPAddresses: description: "" type: "array" items: $ref: "#/definitions/Address" x-nullable: true # TODO is SecondaryIPv6Addresses actually used? SecondaryIPv6Addresses: description: "" type: "array" items: $ref: "#/definitions/Address" x-nullable: true # TODO properties below are part of DefaultNetworkSettings, which is # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 EndpointID: description: | EndpointID uniquely represents a service endpoint in a Sandbox. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" Gateway: description: | Gateway address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "172.17.0.1" GlobalIPv6Address: description: | Global IPv6 address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "2001:db8::5689" GlobalIPv6PrefixLen: description: | Mask length of the global IPv6 address. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "integer" example: 64 IPAddress: description: | IPv4 address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "172.17.0.4" IPPrefixLen: description: | Mask length of the IPv4 address. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "integer" example: 16 IPv6Gateway: description: | IPv6 gateway address for this network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "2001:db8:2::100" MacAddress: description: | MAC address for the container on the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "02:42:ac:11:00:04" Networks: description: | Information about all networks that the container is connected to. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" Address: description: Address represents an IPv4 or IPv6 IP address. type: "object" properties: Addr: description: IP address. type: "string" PrefixLen: description: Mask length of the IP address. type: "integer" PortMap: description: | PortMap describes the mapping of container ports to host ports, using the container's port-number and protocol as key in the format `<port>/<protocol>`, for example, `80/udp`. If a container's port is mapped for multiple protocols, separate entries are added to the mapping table. type: "object" additionalProperties: type: "array" x-nullable: true items: $ref: "#/definitions/PortBinding" example: "443/tcp": - HostIp: "127.0.0.1" HostPort: "4443" "80/tcp": - HostIp: "0.0.0.0" HostPort: "80" - HostIp: "0.0.0.0" HostPort: "8080" "80/udp": - HostIp: "0.0.0.0" HostPort: "80" "53/udp": - HostIp: "0.0.0.0" HostPort: "53" "2377/tcp": null PortBinding: description: | PortBinding represents a binding between a host IP address and a host port. type: "object" properties: HostIp: description: "Host IP address that the container's port is mapped to." type: "string" example: "127.0.0.1" HostPort: description: "Host port number that the container's port is mapped to." type: "string" example: "4443" GraphDriverData: description: "Information about a container's graph driver." type: "object" required: [Name, Data] properties: Name: type: "string" x-nullable: false Data: type: "object" x-nullable: false additionalProperties: type: "string" Image: type: "object" required: - Id - Parent - Comment - Created - Container - DockerVersion - Author - Architecture - Os - Size - VirtualSize - GraphDriver - RootFS properties: Id: type: "string" x-nullable: false RepoTags: type: "array" items: type: "string" RepoDigests: type: "array" items: type: "string" Parent: type: "string" x-nullable: false Comment: type: "string" x-nullable: false Created: type: "string" x-nullable: false Container: type: "string" x-nullable: false ContainerConfig: $ref: "#/definitions/ContainerConfig" DockerVersion: type: "string" x-nullable: false Author: type: "string" x-nullable: false Config: $ref: "#/definitions/ContainerConfig" Architecture: type: "string" x-nullable: false Os: type: "string" x-nullable: false OsVersion: type: "string" Size: type: "integer" format: "int64" x-nullable: false VirtualSize: type: "integer" format: "int64" x-nullable: false GraphDriver: $ref: "#/definitions/GraphDriverData" RootFS: type: "object" required: [Type] properties: Type: type: "string" x-nullable: false Layers: type: "array" items: type: "string" BaseLayer: type: "string" Metadata: type: "object" properties: LastTagTime: type: "string" format: "dateTime" ImageSummary: type: "object" required: - Id - ParentId - RepoTags - RepoDigests - Created - Size - SharedSize - VirtualSize - Labels - Containers properties: Id: type: "string" x-nullable: false ParentId: type: "string" x-nullable: false RepoTags: type: "array" x-nullable: false items: type: "string" RepoDigests: type: "array" x-nullable: false items: type: "string" Created: type: "integer" x-nullable: false Size: type: "integer" x-nullable: false SharedSize: type: "integer" x-nullable: false VirtualSize: type: "integer" x-nullable: false Labels: type: "object" x-nullable: false additionalProperties: type: "string" Containers: x-nullable: false type: "integer" AuthConfig: type: "object" properties: username: type: "string" password: type: "string" email: type: "string" serveraddress: type: "string" example: username: "hannibal" password: "xxxx" serveraddress: "https://index.docker.io/v1/" ProcessConfig: type: "object" properties: privileged: type: "boolean" user: type: "string" tty: type: "boolean" entrypoint: type: "string" arguments: type: "array" items: type: "string" Volume: type: "object" required: [Name, Driver, Mountpoint, Labels, Scope, Options] properties: Name: type: "string" description: "Name of the volume." x-nullable: false Driver: type: "string" description: "Name of the volume driver used by the volume." x-nullable: false Mountpoint: type: "string" description: "Mount path of the volume on the host." x-nullable: false CreatedAt: type: "string" format: "dateTime" description: "Date/Time the volume was created." Status: type: "object" description: | Low-level details about the volume, provided by the volume driver. Details are returned as a map with key/value pairs: `{"key":"value","key2":"value2"}`. The `Status` field is optional, and is omitted if the volume driver does not support this feature. additionalProperties: type: "object" Labels: type: "object" description: "User-defined key/value metadata." x-nullable: false additionalProperties: type: "string" Scope: type: "string" description: | The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. default: "local" x-nullable: false enum: ["local", "global"] Options: type: "object" description: | The driver specific options used when creating the volume. additionalProperties: type: "string" UsageData: type: "object" x-nullable: true required: [Size, RefCount] description: | Usage details about the volume. This information is used by the `GET /system/df` endpoint, and omitted in other endpoints. properties: Size: type: "integer" default: -1 description: | Amount of disk space used by the volume (in bytes). This information is only available for volumes created with the `"local"` volume driver. For volumes created with other volume drivers, this field is set to `-1` ("not available") x-nullable: false RefCount: type: "integer" default: -1 description: | The number of containers referencing this volume. This field is set to `-1` if the reference-count is not available. x-nullable: false example: Name: "tardis" Driver: "custom" Mountpoint: "/var/lib/docker/volumes/tardis" Status: hello: "world" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Scope: "local" CreatedAt: "2016-06-07T20:31:11.853781916Z" Network: type: "object" properties: Name: type: "string" Id: type: "string" Created: type: "string" format: "dateTime" Scope: type: "string" Driver: type: "string" EnableIPv6: type: "boolean" IPAM: $ref: "#/definitions/IPAM" Internal: type: "boolean" Attachable: type: "boolean" Ingress: type: "boolean" Containers: type: "object" additionalProperties: $ref: "#/definitions/NetworkContainer" Options: type: "object" additionalProperties: type: "string" Labels: type: "object" additionalProperties: type: "string" example: Name: "net01" Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" Created: "2016-10-19T04:33:30.360899459Z" Scope: "local" Driver: "bridge" EnableIPv6: false IPAM: Driver: "default" Config: - Subnet: "172.19.0.0/16" Gateway: "172.19.0.1" Options: foo: "bar" Internal: false Attachable: false Ingress: false Containers: 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: Name: "test" EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" MacAddress: "02:42:ac:13:00:02" IPv4Address: "172.19.0.2/16" IPv6Address: "" Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" IPAM: type: "object" properties: Driver: description: "Name of the IPAM driver to use." type: "string" default: "default" Config: description: | List of IPAM configuration options, specified as a map: ``` {"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>} ``` type: "array" items: type: "object" additionalProperties: type: "string" Options: description: "Driver-specific options, specified as a map." type: "object" additionalProperties: type: "string" NetworkContainer: type: "object" properties: Name: type: "string" EndpointID: type: "string" MacAddress: type: "string" IPv4Address: type: "string" IPv6Address: type: "string" BuildInfo: type: "object" properties: id: type: "string" stream: type: "string" error: type: "string" errorDetail: $ref: "#/definitions/ErrorDetail" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" aux: $ref: "#/definitions/ImageID" BuildCache: type: "object" properties: ID: type: "string" Parent: type: "string" Type: type: "string" Description: type: "string" InUse: type: "boolean" Shared: type: "boolean" Size: description: | Amount of disk space used by the build cache (in bytes). type: "integer" CreatedAt: description: | Date and time at which the build cache was created in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" LastUsedAt: description: | Date and time at which the build cache was last used in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" x-nullable: true example: "2017-08-09T07:09:37.632105588Z" UsageCount: type: "integer" ImageID: type: "object" description: "Image ID or Digest" properties: ID: type: "string" example: ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" CreateImageInfo: type: "object" properties: id: type: "string" error: type: "string" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" PushImageInfo: type: "object" properties: error: type: "string" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" ErrorDetail: type: "object" properties: code: type: "integer" message: type: "string" ProgressDetail: type: "object" properties: current: type: "integer" total: type: "integer" ErrorResponse: description: "Represents an error." type: "object" required: ["message"] properties: message: description: "The error message." type: "string" x-nullable: false example: message: "Something went wrong." IdResponse: description: "Response to an API call that returns just an Id" type: "object" required: ["Id"] properties: Id: description: "The id of the newly created object." type: "string" x-nullable: false EndpointSettings: description: "Configuration for a network endpoint." type: "object" properties: # Configurations IPAMConfig: $ref: "#/definitions/EndpointIPAMConfig" Links: type: "array" items: type: "string" example: - "container_1" - "container_2" Aliases: type: "array" items: type: "string" example: - "server_x" - "server_y" # Operational data NetworkID: description: | Unique ID of the network. type: "string" example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" EndpointID: description: | Unique ID for the service endpoint in a Sandbox. type: "string" example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" Gateway: description: | Gateway address for this network. type: "string" example: "172.17.0.1" IPAddress: description: | IPv4 address. type: "string" example: "172.17.0.4" IPPrefixLen: description: | Mask length of the IPv4 address. type: "integer" example: 16 IPv6Gateway: description: | IPv6 gateway address. type: "string" example: "2001:db8:2::100" GlobalIPv6Address: description: | Global IPv6 address. type: "string" example: "2001:db8::5689" GlobalIPv6PrefixLen: description: | Mask length of the global IPv6 address. type: "integer" format: "int64" example: 64 MacAddress: description: | MAC address for the endpoint on this network. type: "string" example: "02:42:ac:11:00:04" DriverOpts: description: | DriverOpts is a mapping of driver options and values. These options are passed directly to the driver and are driver specific. type: "object" x-nullable: true additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" EndpointIPAMConfig: description: | EndpointIPAMConfig represents an endpoint's IPAM configuration. type: "object" x-nullable: true properties: IPv4Address: type: "string" example: "172.20.30.33" IPv6Address: type: "string" example: "2001:db8:abcd::3033" LinkLocalIPs: type: "array" items: type: "string" example: - "169.254.34.68" - "fe80::3468" PluginMount: type: "object" x-nullable: false required: [Name, Description, Settable, Source, Destination, Type, Options] properties: Name: type: "string" x-nullable: false example: "some-mount" Description: type: "string" x-nullable: false example: "This is a mount that's used by the plugin." Settable: type: "array" items: type: "string" Source: type: "string" example: "/var/lib/docker/plugins/" Destination: type: "string" x-nullable: false example: "/mnt/state" Type: type: "string" x-nullable: false example: "bind" Options: type: "array" items: type: "string" example: - "rbind" - "rw" PluginDevice: type: "object" required: [Name, Description, Settable, Path] x-nullable: false properties: Name: type: "string" x-nullable: false Description: type: "string" x-nullable: false Settable: type: "array" items: type: "string" Path: type: "string" example: "/dev/fuse" PluginEnv: type: "object" x-nullable: false required: [Name, Description, Settable, Value] properties: Name: x-nullable: false type: "string" Description: x-nullable: false type: "string" Settable: type: "array" items: type: "string" Value: type: "string" PluginInterfaceType: type: "object" x-nullable: false required: [Prefix, Capability, Version] properties: Prefix: type: "string" x-nullable: false Capability: type: "string" x-nullable: false Version: type: "string" x-nullable: false PluginPrivilegeItem: description: | Describes a permission the user has to accept upon installing the plugin. type: "object" properties: Name: type: "string" example: "network" Description: type: "string" Value: type: "array" items: type: "string" example: - "host" Plugin: description: "A plugin for the Engine API" type: "object" required: [Settings, Enabled, Config, Name] properties: Id: type: "string" example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" Name: type: "string" x-nullable: false example: "tiborvass/sample-volume-plugin" Enabled: description: True if the plugin is running. False if the plugin is not running, only installed. type: "boolean" x-nullable: false example: true Settings: description: "Settings that can be modified by users." type: "object" x-nullable: false required: [Args, Devices, Env, Mounts] properties: Mounts: type: "array" items: $ref: "#/definitions/PluginMount" Env: type: "array" items: type: "string" example: - "DEBUG=0" Args: type: "array" items: type: "string" Devices: type: "array" items: $ref: "#/definitions/PluginDevice" PluginReference: description: "plugin remote reference used to push/pull the plugin" type: "string" x-nullable: false example: "localhost:5000/tiborvass/sample-volume-plugin:latest" Config: description: "The config of a plugin." type: "object" x-nullable: false required: - Description - Documentation - Interface - Entrypoint - WorkDir - Network - Linux - PidHost - PropagatedMount - IpcHost - Mounts - Env - Args properties: DockerVersion: description: "Docker Version used to create the plugin" type: "string" x-nullable: false example: "17.06.0-ce" Description: type: "string" x-nullable: false example: "A sample volume plugin for Docker" Documentation: type: "string" x-nullable: false example: "https://docs.docker.com/engine/extend/plugins/" Interface: description: "The interface between Docker and the plugin" x-nullable: false type: "object" required: [Types, Socket] properties: Types: type: "array" items: $ref: "#/definitions/PluginInterfaceType" example: - "docker.volumedriver/1.0" Socket: type: "string" x-nullable: false example: "plugins.sock" ProtocolScheme: type: "string" example: "some.protocol/v1.0" description: "Protocol to use for clients connecting to the plugin." enum: - "" - "moby.plugins.http/v1" Entrypoint: type: "array" items: type: "string" example: - "/usr/bin/sample-volume-plugin" - "/data" WorkDir: type: "string" x-nullable: false example: "/bin/" User: type: "object" x-nullable: false properties: UID: type: "integer" format: "uint32" example: 1000 GID: type: "integer" format: "uint32" example: 1000 Network: type: "object" x-nullable: false required: [Type] properties: Type: x-nullable: false type: "string" example: "host" Linux: type: "object" x-nullable: false required: [Capabilities, AllowAllDevices, Devices] properties: Capabilities: type: "array" items: type: "string" example: - "CAP_SYS_ADMIN" - "CAP_SYSLOG" AllowAllDevices: type: "boolean" x-nullable: false example: false Devices: type: "array" items: $ref: "#/definitions/PluginDevice" PropagatedMount: type: "string" x-nullable: false example: "/mnt/volumes" IpcHost: type: "boolean" x-nullable: false example: false PidHost: type: "boolean" x-nullable: false example: false Mounts: type: "array" items: $ref: "#/definitions/PluginMount" Env: type: "array" items: $ref: "#/definitions/PluginEnv" example: - Name: "DEBUG" Description: "If set, prints debug messages" Settable: null Value: "0" Args: type: "object" x-nullable: false required: [Name, Description, Settable, Value] properties: Name: x-nullable: false type: "string" example: "args" Description: x-nullable: false type: "string" example: "command line arguments" Settable: type: "array" items: type: "string" Value: type: "array" items: type: "string" rootfs: type: "object" properties: type: type: "string" example: "layers" diff_ids: type: "array" items: type: "string" example: - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" ObjectVersion: description: | The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. The client must send the version number along with the modified specification when updating these objects. This approach ensures safe concurrency and determinism in that the change on the object may not be applied if the version number has changed from the last read. In other words, if two update requests specify the same base version, only one of the requests can succeed. As a result, two separate update requests that happen at the same time will not unintentionally overwrite each other. type: "object" properties: Index: type: "integer" format: "uint64" example: 373531 NodeSpec: type: "object" properties: Name: description: "Name for the node." type: "string" example: "my-node" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Role: description: "Role of the node." type: "string" enum: - "worker" - "manager" example: "manager" Availability: description: "Availability of the node." type: "string" enum: - "active" - "pause" - "drain" example: "active" example: Availability: "active" Name: "node-name" Role: "manager" Labels: foo: "bar" Node: type: "object" properties: ID: type: "string" example: "24ifsmvkjbyhk" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: description: | Date and time at which the node was added to the swarm in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: description: | Date and time at which the node was last updated in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/NodeSpec" Description: $ref: "#/definitions/NodeDescription" Status: $ref: "#/definitions/NodeStatus" ManagerStatus: $ref: "#/definitions/ManagerStatus" NodeDescription: description: | NodeDescription encapsulates the properties of the Node as reported by the agent. type: "object" properties: Hostname: type: "string" example: "bf3067039e47" Platform: $ref: "#/definitions/Platform" Resources: $ref: "#/definitions/ResourceObject" Engine: $ref: "#/definitions/EngineDescription" TLSInfo: $ref: "#/definitions/TLSInfo" Platform: description: | Platform represents the platform (Arch/OS). type: "object" properties: Architecture: description: | Architecture represents the hardware architecture (for example, `x86_64`). type: "string" example: "x86_64" OS: description: | OS represents the Operating System (for example, `linux` or `windows`). type: "string" example: "linux" EngineDescription: description: "EngineDescription provides information about an engine." type: "object" properties: EngineVersion: type: "string" example: "17.06.0" Labels: type: "object" additionalProperties: type: "string" example: foo: "bar" Plugins: type: "array" items: type: "object" properties: Type: type: "string" Name: type: "string" example: - Type: "Log" Name: "awslogs" - Type: "Log" Name: "fluentd" - Type: "Log" Name: "gcplogs" - Type: "Log" Name: "gelf" - Type: "Log" Name: "journald" - Type: "Log" Name: "json-file" - Type: "Log" Name: "logentries" - Type: "Log" Name: "splunk" - Type: "Log" Name: "syslog" - Type: "Network" Name: "bridge" - Type: "Network" Name: "host" - Type: "Network" Name: "ipvlan" - Type: "Network" Name: "macvlan" - Type: "Network" Name: "null" - Type: "Network" Name: "overlay" - Type: "Volume" Name: "local" - Type: "Volume" Name: "localhost:5000/vieux/sshfs:latest" - Type: "Volume" Name: "vieux/sshfs:latest" TLSInfo: description: | Information about the issuer of leaf TLS certificates and the trusted root CA certificate. type: "object" properties: TrustRoot: description: | The root CA certificate(s) that are used to validate leaf TLS certificates. type: "string" CertIssuerSubject: description: The base64-url-safe-encoded raw subject bytes of the issuer. type: "string" CertIssuerPublicKey: description: | The base64-url-safe-encoded raw public key bytes of the issuer. type: "string" example: TrustRoot: | -----BEGIN CERTIFICATE----- MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H -----END CERTIFICATE----- CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" NodeStatus: description: | NodeStatus represents the status of a node. It provides the current status of the node, as seen by the manager. type: "object" properties: State: $ref: "#/definitions/NodeState" Message: type: "string" example: "" Addr: description: "IP address of the node." type: "string" example: "172.17.0.2" NodeState: description: "NodeState represents the state of a node." type: "string" enum: - "unknown" - "down" - "ready" - "disconnected" example: "ready" ManagerStatus: description: | ManagerStatus represents the status of a manager. It provides the current status of a node's manager component, if the node is a manager. x-nullable: true type: "object" properties: Leader: type: "boolean" default: false example: true Reachability: $ref: "#/definitions/Reachability" Addr: description: | The IP address and port at which the manager is reachable. type: "string" example: "10.0.0.46:2377" Reachability: description: "Reachability represents the reachability of a node." type: "string" enum: - "unknown" - "unreachable" - "reachable" example: "reachable" SwarmSpec: description: "User modifiable swarm configuration." type: "object" properties: Name: description: "Name of the swarm." type: "string" example: "default" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.corp.type: "production" com.example.corp.department: "engineering" Orchestration: description: "Orchestration configuration." type: "object" x-nullable: true properties: TaskHistoryRetentionLimit: description: | The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks. type: "integer" format: "int64" example: 10 Raft: description: "Raft configuration." type: "object" properties: SnapshotInterval: description: "The number of log entries between snapshots." type: "integer" format: "uint64" example: 10000 KeepOldSnapshots: description: | The number of snapshots to keep beyond the current snapshot. type: "integer" format: "uint64" LogEntriesForSlowFollowers: description: | The number of log entries to keep around to sync up slow followers after a snapshot is created. type: "integer" format: "uint64" example: 500 ElectionTick: description: | The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 3 HeartbeatTick: description: | The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 1 Dispatcher: description: "Dispatcher configuration." type: "object" x-nullable: true properties: HeartbeatPeriod: description: | The delay for an agent to send a heartbeat to the dispatcher. type: "integer" format: "int64" example: 5000000000 CAConfig: description: "CA configuration." type: "object" x-nullable: true properties: NodeCertExpiry: description: "The duration node certificates are issued for." type: "integer" format: "int64" example: 7776000000000000 ExternalCAs: description: | Configuration for forwarding signing requests to an external certificate authority. type: "array" items: type: "object" properties: Protocol: description: | Protocol for communication with the external CA (currently only `cfssl` is supported). type: "string" enum: - "cfssl" default: "cfssl" URL: description: | URL where certificate signing requests should be sent. type: "string" Options: description: | An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver. type: "object" additionalProperties: type: "string" CACert: description: | The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided). type: "string" SigningCACert: description: | The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format. type: "string" SigningCAKey: description: | The desired signing CA key for all swarm node TLS leaf certificates, in PEM format. type: "string" ForceRotate: description: | An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey` format: "uint64" type: "integer" EncryptionConfig: description: "Parameters related to encryption-at-rest." type: "object" properties: AutoLockManagers: description: | If set, generate a key and use it to lock data stored on the managers. type: "boolean" example: false TaskDefaults: description: "Defaults for creating tasks in this cluster." type: "object" properties: LogDriver: description: | The log driver to use for tasks created in the orchestrator if unspecified by a service. Updating this value only affects new tasks. Existing tasks continue to use their previously configured log driver until recreated. type: "object" properties: Name: description: | The log driver to use as a default for new tasks. type: "string" example: "json-file" Options: description: | Driver-specific options for the selectd log driver, specified as key/value pairs. type: "object" additionalProperties: type: "string" example: "max-file": "10" "max-size": "100m" # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but # without `JoinTokens`. ClusterInfo: description: | ClusterInfo represents information about the swarm as is returned by the "/info" endpoint. Join-tokens are not included. x-nullable: true type: "object" properties: ID: description: "The ID of the swarm." type: "string" example: "abajmipo7b4xz5ip2nrla6b11" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: description: | Date and time at which the swarm was initialised in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: description: | Date and time at which the swarm was last updated in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/SwarmSpec" TLSInfo: $ref: "#/definitions/TLSInfo" RootRotationInProgress: description: | Whether there is currently a root CA rotation in progress for the swarm type: "boolean" example: false DataPathPort: description: | DataPathPort specifies the data path port number for data traffic. Acceptable port range is 1024 to 49151. If no port is set or is set to 0, the default port (4789) is used. type: "integer" format: "uint32" default: 4789 example: 4789 DefaultAddrPool: description: | Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" format: "CIDR" example: ["10.10.0.0/16", "20.20.0.0/16"] SubnetSize: description: | SubnetSize specifies the subnet size of the networks created from the default subnet pool. type: "integer" format: "uint32" maximum: 29 default: 24 example: 24 JoinTokens: description: | JoinTokens contains the tokens workers and managers need to join the swarm. type: "object" properties: Worker: description: | The token workers can use to join the swarm. type: "string" example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" Manager: description: | The token managers can use to join the swarm. type: "string" example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" Swarm: type: "object" allOf: - $ref: "#/definitions/ClusterInfo" - type: "object" properties: JoinTokens: $ref: "#/definitions/JoinTokens" TaskSpec: description: "User modifiable task configuration." type: "object" properties: PluginSpec: type: "object" description: | Plugin spec for the service. *(Experimental release only.)* <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. properties: Name: description: "The name or 'alias' to use for the plugin." type: "string" Remote: description: "The plugin image reference to use." type: "string" Disabled: description: "Disable the plugin once scheduled." type: "boolean" PluginPrivilege: type: "array" items: $ref: "#/definitions/PluginPrivilegeItem" ContainerSpec: type: "object" description: | Container spec for the service. <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. properties: Image: description: "The image name to use for the container" type: "string" Labels: description: "User-defined key/value data." type: "object" additionalProperties: type: "string" Command: description: "The command to be run in the image." type: "array" items: type: "string" Args: description: "Arguments to the command." type: "array" items: type: "string" Hostname: description: | The hostname to use for the container, as a valid [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. type: "string" Env: description: | A list of environment variables in the form `VAR=value`. type: "array" items: type: "string" Dir: description: "The working directory for commands to run in." type: "string" User: description: "The user inside the container." type: "string" Groups: type: "array" description: | A list of additional groups that the container process will run as. items: type: "string" Privileges: type: "object" description: "Security options for the container" properties: CredentialSpec: type: "object" description: "CredentialSpec for managed service account (Windows only)" properties: Config: type: "string" example: "0bt9dmxjvjiqermk6xrop3ekq" description: | Load credential spec from a Swarm Config with the given ID. The specified config must also be present in the Configs field with the Runtime property set. <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. File: type: "string" example: "spec.json" description: | Load credential spec from this file. The file is read by the daemon, and must be present in the `CredentialSpecs` subdirectory in the docker data directory, which defaults to `C:\ProgramData\Docker\` on Windows. For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`. <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. Registry: type: "string" description: | Load credential spec from this value in the Windows registry. The specified registry value must be located in: `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. SELinuxContext: type: "object" description: "SELinux labels of the container" properties: Disable: type: "boolean" description: "Disable SELinux" User: type: "string" description: "SELinux user label" Role: type: "string" description: "SELinux role label" Type: type: "string" description: "SELinux type label" Level: type: "string" description: "SELinux level label" TTY: description: "Whether a pseudo-TTY should be allocated." type: "boolean" OpenStdin: description: "Open `stdin`" type: "boolean" ReadOnly: description: "Mount the container's root filesystem as read only." type: "boolean" Mounts: description: | Specification for mounts to be added to containers created as part of the service. type: "array" items: $ref: "#/definitions/Mount" StopSignal: description: "Signal to stop the container." type: "string" StopGracePeriod: description: | Amount of time to wait for the container to terminate before forcefully killing it. type: "integer" format: "int64" HealthCheck: $ref: "#/definitions/HealthConfig" Hosts: type: "array" description: | A list of hostname/IP mappings to add to the container's `hosts` file. The format of extra hosts is specified in the [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) man page: IP_address canonical_hostname [aliases...] items: type: "string" DNSConfig: description: | Specification for DNS related configurations in resolver configuration file (`resolv.conf`). type: "object" properties: Nameservers: description: "The IP addresses of the name servers." type: "array" items: type: "string" Search: description: "A search list for host-name lookup." type: "array" items: type: "string" Options: description: | A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.). type: "array" items: type: "string" Secrets: description: | Secrets contains references to zero or more secrets that will be exposed to the service. type: "array" items: type: "object" properties: File: description: | File represents a specific target that is backed by a file. type: "object" properties: Name: description: | Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." type: "string" GID: description: "GID represents the file GID." type: "string" Mode: description: "Mode represents the FileMode of the file." type: "integer" format: "uint32" SecretID: description: | SecretID represents the ID of the specific secret that we're referencing. type: "string" SecretName: description: | SecretName is the name of the secret that this references, but this is just provided for lookup/display purposes. The secret in the reference will be identified by its ID. type: "string" Configs: description: | Configs contains references to zero or more configs that will be exposed to the service. type: "array" items: type: "object" properties: File: description: | File represents a specific target that is backed by a file. <p><br /><p> > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive type: "object" properties: Name: description: | Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." type: "string" GID: description: "GID represents the file GID." type: "string" Mode: description: "Mode represents the FileMode of the file." type: "integer" format: "uint32" Runtime: description: | Runtime represents a target that is not mounted into the container but is used by the task <p><br /><p> > **Note**: `Configs.File` and `Configs.Runtime` are mutually > exclusive type: "object" ConfigID: description: | ConfigID represents the ID of the specific config that we're referencing. type: "string" ConfigName: description: | ConfigName is the name of the config that this references, but this is just provided for lookup/display purposes. The config in the reference will be identified by its ID. type: "string" Isolation: type: "string" description: | Isolation technology of the containers running the service. (Windows only) enum: - "default" - "process" - "hyperv" Init: description: | Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. type: "boolean" x-nullable: true Sysctls: description: | Set kernel namedspaced parameters (sysctls) in the container. The Sysctls option on services accepts the same sysctls as the are supported on containers. Note that while the same sysctls are supported, no guarantees or checks are made about their suitability for a clustered environment, and it's up to the user to determine whether a given sysctl will work properly in a Service. type: "object" additionalProperties: type: "string" # This option is not used by Windows containers CapabilityAdd: type: "array" description: | A list of kernel capabilities to add to the default set for the container. items: type: "string" example: - "CAP_NET_RAW" - "CAP_SYS_ADMIN" - "CAP_SYS_CHROOT" - "CAP_SYSLOG" CapabilityDrop: type: "array" description: | A list of kernel capabilities to drop from the default set for the container. items: type: "string" example: - "CAP_NET_RAW" Ulimits: description: | A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" type: "array" items: type: "object" properties: Name: description: "Name of ulimit" type: "string" Soft: description: "Soft limit" type: "integer" Hard: description: "Hard limit" type: "integer" NetworkAttachmentSpec: description: | Read-only spec type for non-swarm containers attached to swarm overlay networks. <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. type: "object" properties: ContainerID: description: "ID of the container represented by this task" type: "string" Resources: description: | Resource requirements which apply to each individual container created as part of the service. type: "object" properties: Limits: description: "Define resources limits." $ref: "#/definitions/Limit" Reservation: description: "Define resources reservation." $ref: "#/definitions/ResourceObject" RestartPolicy: description: | Specification for the restart policy which applies to containers created as part of this service. type: "object" properties: Condition: description: "Condition for restart." type: "string" enum: - "none" - "on-failure" - "any" Delay: description: "Delay between restart attempts." type: "integer" format: "int64" MaxAttempts: description: | Maximum attempts to restart a given container before giving up (default value is 0, which is ignored). type: "integer" format: "int64" default: 0 Window: description: | Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded). type: "integer" format: "int64" default: 0 Placement: type: "object" properties: Constraints: description: | An array of constraint expressions to limit the set of nodes where a task can be scheduled. Constraint expressions can either use a _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find nodes that satisfy every expression (AND match). Constraints can match node or Docker Engine labels as follows: node attribute | matches | example ---------------------|--------------------------------|----------------------------------------------- `node.id` | Node ID | `node.id==2ivku8v2gvtg4` `node.hostname` | Node hostname | `node.hostname!=node-2` `node.role` | Node role (`manager`/`worker`) | `node.role==manager` `node.platform.os` | Node operating system | `node.platform.os==windows` `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` `node.labels` | User-defined node labels | `node.labels.security==high` `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` `engine.labels` apply to Docker Engine labels like operating system, drivers, etc. Swarm administrators add `node.labels` for operational purposes by using the [`node update endpoint`](#operation/NodeUpdate). type: "array" items: type: "string" example: - "node.hostname!=node3.corp.example.com" - "node.role!=manager" - "node.labels.type==production" - "node.platform.os==linux" - "node.platform.arch==x86_64" Preferences: description: | Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence. type: "array" items: type: "object" properties: Spread: type: "object" properties: SpreadDescriptor: description: | label descriptor, such as `engine.labels.az`. type: "string" example: - Spread: SpreadDescriptor: "node.labels.datacenter" - Spread: SpreadDescriptor: "node.labels.rack" MaxReplicas: description: | Maximum number of replicas for per node (default value is 0, which is unlimited) type: "integer" format: "int64" default: 0 Platforms: description: | Platforms stores all the platforms that the service's image can run on. This field is used in the platform filter for scheduling. If empty, then the platform filter is off, meaning there are no scheduling restrictions. type: "array" items: $ref: "#/definitions/Platform" ForceUpdate: description: | A counter that triggers an update even if no relevant parameters have been changed. type: "integer" Runtime: description: | Runtime is the type of runtime specified for the task executor. type: "string" Networks: description: "Specifies which networks the service should attach to." type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" LogDriver: description: | Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified. type: "object" properties: Name: type: "string" Options: type: "object" additionalProperties: type: "string" TaskState: type: "string" enum: - "new" - "allocated" - "pending" - "assigned" - "accepted" - "preparing" - "ready" - "starting" - "running" - "complete" - "shutdown" - "failed" - "rejected" - "remove" - "orphaned" Task: type: "object" properties: ID: description: "The ID of the task." type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Name: description: "Name of the task." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Spec: $ref: "#/definitions/TaskSpec" ServiceID: description: "The ID of the service this task is part of." type: "string" Slot: type: "integer" NodeID: description: "The ID of the node that this task is on." type: "string" AssignedGenericResources: $ref: "#/definitions/GenericResources" Status: type: "object" properties: Timestamp: type: "string" format: "dateTime" State: $ref: "#/definitions/TaskState" Message: type: "string" Err: type: "string" ContainerStatus: type: "object" properties: ContainerID: type: "string" PID: type: "integer" ExitCode: type: "integer" DesiredState: $ref: "#/definitions/TaskState" JobIteration: description: | If the Service this Task belongs to is a job-mode service, contains the JobIteration of the Service this Task was created for. Absent if the Task was created for a Replicated or Global Service. $ref: "#/definitions/ObjectVersion" example: ID: "0kzzo1i0y4jz6027t0k7aezc7" Version: Index: 71 CreatedAt: "2016-06-07T21:07:31.171892745Z" UpdatedAt: "2016-06-07T21:07:31.376370513Z" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:31.290032978Z" State: "running" Message: "started" ContainerStatus: ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" PID: 677 DesiredState: "running" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.10/16" AssignedGenericResources: - DiscreteResourceSpec: Kind: "SSD" Value: 3 - NamedResourceSpec: Kind: "GPU" Value: "UUID1" - NamedResourceSpec: Kind: "GPU" Value: "UUID2" ServiceSpec: description: "User modifiable configuration for a service." properties: Name: description: "Name of the service." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" TaskTemplate: $ref: "#/definitions/TaskSpec" Mode: description: "Scheduling mode for the service." type: "object" properties: Replicated: type: "object" properties: Replicas: type: "integer" format: "int64" Global: type: "object" ReplicatedJob: description: | The mode used for services with a finite number of tasks that run to a completed state. type: "object" properties: MaxConcurrent: description: | The maximum number of replicas to run simultaneously. type: "integer" format: "int64" default: 1 TotalCompletions: description: | The total number of replicas desired to reach the Completed state. If unset, will default to the value of `MaxConcurrent` type: "integer" format: "int64" GlobalJob: description: | The mode used for services which run a task to the completed state on each valid node. type: "object" UpdateConfig: description: "Specification for the update strategy of the service." type: "object" properties: Parallelism: description: | Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism). type: "integer" format: "int64" Delay: description: "Amount of time between updates, in nanoseconds." type: "integer" format: "int64" FailureAction: description: | Action to take if an updated task fails to run, or stops running during the update. type: "string" enum: - "continue" - "pause" - "rollback" Monitor: description: | Amount of time to monitor each updated task for failures, in nanoseconds. type: "integer" format: "int64" MaxFailureRatio: description: | The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1. type: "number" default: 0 Order: description: | The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down. type: "string" enum: - "stop-first" - "start-first" RollbackConfig: description: "Specification for the rollback strategy of the service." type: "object" properties: Parallelism: description: | Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism). type: "integer" format: "int64" Delay: description: | Amount of time between rollback iterations, in nanoseconds. type: "integer" format: "int64" FailureAction: description: | Action to take if an rolled back task fails to run, or stops running during the rollback. type: "string" enum: - "continue" - "pause" Monitor: description: | Amount of time to monitor each rolled back task for failures, in nanoseconds. type: "integer" format: "int64" MaxFailureRatio: description: | The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1. type: "number" default: 0 Order: description: | The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down. type: "string" enum: - "stop-first" - "start-first" Networks: description: "Specifies which networks the service should attach to." type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" EndpointSpec: $ref: "#/definitions/EndpointSpec" EndpointPortConfig: type: "object" properties: Name: type: "string" Protocol: type: "string" enum: - "tcp" - "udp" - "sctp" TargetPort: description: "The port inside the container." type: "integer" PublishedPort: description: "The port on the swarm hosts." type: "integer" PublishMode: description: | The mode in which port is published. <p><br /></p> - "ingress" makes the target port accessible on every node, regardless of whether there is a task for the service running on that node or not. - "host" bypasses the routing mesh and publish the port directly on the swarm node where that service is running. type: "string" enum: - "ingress" - "host" default: "ingress" example: "ingress" EndpointSpec: description: "Properties that can be configured to access and load balance a service." type: "object" properties: Mode: description: | The mode of resolution to use for internal load balancing between tasks. type: "string" enum: - "vip" - "dnsrr" default: "vip" Ports: description: | List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used. type: "array" items: $ref: "#/definitions/EndpointPortConfig" Service: type: "object" properties: ID: type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Spec: $ref: "#/definitions/ServiceSpec" Endpoint: type: "object" properties: Spec: $ref: "#/definitions/EndpointSpec" Ports: type: "array" items: $ref: "#/definitions/EndpointPortConfig" VirtualIPs: type: "array" items: type: "object" properties: NetworkID: type: "string" Addr: type: "string" UpdateStatus: description: "The status of a service update." type: "object" properties: State: type: "string" enum: - "updating" - "paused" - "completed" StartedAt: type: "string" format: "dateTime" CompletedAt: type: "string" format: "dateTime" Message: type: "string" ServiceStatus: description: | The status of the service's tasks. Provided only when requested as part of a ServiceList operation. type: "object" properties: RunningTasks: description: | The number of tasks for the service currently in the Running state. type: "integer" format: "uint64" example: 7 DesiredTasks: description: | The number of tasks for the service desired to be running. For replicated services, this is the replica count from the service spec. For global services, this is computed by taking count of all tasks for the service with a Desired State other than Shutdown. type: "integer" format: "uint64" example: 10 CompletedTasks: description: | The number of tasks for a job that are in the Completed state. This field must be cross-referenced with the service type, as the value of 0 may mean the service is not in a job mode, or it may mean the job-mode service has no tasks yet Completed. type: "integer" format: "uint64" JobStatus: description: | The status of the service when it is in one of ReplicatedJob or GlobalJob modes. Absent on Replicated and Global mode services. The JobIteration is an ObjectVersion, but unlike the Service's version, does not need to be sent with an update request. type: "object" properties: JobIteration: description: | JobIteration is a value increased each time a Job is executed, successfully or otherwise. "Executed", in this case, means the job as a whole has been started, not that an individual Task has been launched. A job is "Executed" when its ServiceSpec is updated. JobIteration can be used to disambiguate Tasks belonging to different executions of a job. Though JobIteration will increase with each subsequent execution, it may not necessarily increase by 1, and so JobIteration should not be used to $ref: "#/definitions/ObjectVersion" LastExecution: description: | The last time, as observed by the server, that this job was started. type: "string" format: "dateTime" example: ID: "9mnpnzenvg8p8tdbtq4wvbkcz" Version: Index: 19 CreatedAt: "2016-06-07T21:05:51.880065305Z" UpdatedAt: "2016-06-07T21:07:29.962229872Z" Spec: Name: "hopeful_cori" TaskTemplate: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ForceUpdate: 0 Mode: Replicated: Replicas: 1 UpdateConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Mode: "vip" Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 Endpoint: Spec: Mode: "vip" Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 VirtualIPs: - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" Addr: "10.255.0.2/16" - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" Addr: "10.255.0.3/16" ImageDeleteResponseItem: type: "object" properties: Untagged: description: "The image ID of an image that was untagged" type: "string" Deleted: description: "The image ID of an image that was deleted" type: "string" ServiceUpdateResponse: type: "object" properties: Warnings: description: "Optional warning messages" type: "array" items: type: "string" example: Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" ContainerSummary: type: "object" properties: Id: description: "The ID of this container" type: "string" x-go-name: "ID" Names: description: "The names that this container has been given" type: "array" items: type: "string" Image: description: "The name of the image used when creating this container" type: "string" ImageID: description: "The ID of the image that this container was created from" type: "string" Command: description: "Command to run when starting the container" type: "string" Created: description: "When the container was created" type: "integer" format: "int64" Ports: description: "The ports exposed by this container" type: "array" items: $ref: "#/definitions/Port" SizeRw: description: "The size of files that have been created or changed by this container" type: "integer" format: "int64" SizeRootFs: description: "The total size of all the files in this container" type: "integer" format: "int64" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" State: description: "The state of this container (e.g. `Exited`)" type: "string" Status: description: "Additional human-readable status of this container (e.g. `Exit 0`)" type: "string" HostConfig: type: "object" properties: NetworkMode: type: "string" NetworkSettings: description: "A summary of the container's network settings" type: "object" properties: Networks: type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" Mounts: type: "array" items: $ref: "#/definitions/Mount" Driver: description: "Driver represents a driver (network, logging, secrets)." type: "object" required: [Name] properties: Name: description: "Name of the driver." type: "string" x-nullable: false example: "some-driver" Options: description: "Key/value map of driver-specific options." type: "object" x-nullable: false additionalProperties: type: "string" example: OptionA: "value for driver-specific option A" OptionB: "value for driver-specific option B" SecretSpec: type: "object" properties: Name: description: "User-defined name of the secret." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Data: description: | Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) data to store as secret. This field is only used to _create_ a secret, and is not returned by other endpoints. type: "string" example: "" Driver: description: | Name of the secrets driver used to fetch the secret's value from an external secret store. $ref: "#/definitions/Driver" Templating: description: | Templating driver, if applicable Templating controls whether and how to evaluate the config payload as a template. If no driver is set, no templating is used. $ref: "#/definitions/Driver" Secret: type: "object" properties: ID: type: "string" example: "blt1owaxmitz71s9v5zh81zun" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" example: "2017-07-20T13:55:28.678958722Z" UpdatedAt: type: "string" format: "dateTime" example: "2017-07-20T13:55:28.678958722Z" Spec: $ref: "#/definitions/SecretSpec" ConfigSpec: type: "object" properties: Name: description: "User-defined name of the config." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Data: description: | Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) config data. type: "string" Templating: description: | Templating driver, if applicable Templating controls whether and how to evaluate the config payload as a template. If no driver is set, no templating is used. $ref: "#/definitions/Driver" Config: type: "object" properties: ID: type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Spec: $ref: "#/definitions/ConfigSpec" ContainerState: description: | ContainerState stores container's running state. It's part of ContainerJSONBase and will be returned by the "inspect" command. type: "object" properties: Status: description: | String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead". type: "string" enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] example: "running" Running: description: | Whether this container is running. Note that a running container can be _paused_. The `Running` and `Paused` booleans are not mutually exclusive: When pausing a container (on Linux), the freezer cgroup is used to suspend all processes in the container. Freezing the process requires the process to be running. As a result, paused containers are both `Running` _and_ `Paused`. Use the `Status` field instead to determine if a container's state is "running". type: "boolean" example: true Paused: description: "Whether this container is paused." type: "boolean" example: false Restarting: description: "Whether this container is restarting." type: "boolean" example: false OOMKilled: description: | Whether this container has been killed because it ran out of memory. type: "boolean" example: false Dead: type: "boolean" example: false Pid: description: "The process ID of this container" type: "integer" example: 1234 ExitCode: description: "The last exit code of this container" type: "integer" example: 0 Error: type: "string" StartedAt: description: "The time when this container was last started." type: "string" example: "2020-01-06T09:06:59.461876391Z" FinishedAt: description: "The time when this container last exited." type: "string" example: "2020-01-06T09:07:59.461876391Z" Health: x-nullable: true $ref: "#/definitions/Health" SystemVersion: type: "object" description: | Response of Engine API: GET "/version" properties: Platform: type: "object" required: [Name] properties: Name: type: "string" Components: type: "array" description: | Information about system components items: type: "object" x-go-name: ComponentVersion required: [Name, Version] properties: Name: description: | Name of the component type: "string" example: "Engine" Version: description: | Version of the component type: "string" x-nullable: false example: "19.03.12" Details: description: | Key/value pairs of strings with additional information about the component. These values are intended for informational purposes only, and their content is not defined, and not part of the API specification. These messages can be printed by the client as information to the user. type: "object" x-nullable: true Version: description: "The version of the daemon" type: "string" example: "19.03.12" ApiVersion: description: | The default (and highest) API version that is supported by the daemon type: "string" example: "1.40" MinAPIVersion: description: | The minimum API version that is supported by the daemon type: "string" example: "1.12" GitCommit: description: | The Git commit of the source code that was used to build the daemon type: "string" example: "48a66213fe" GoVersion: description: | The version Go used to compile the daemon, and the version of the Go runtime in use. type: "string" example: "go1.13.14" Os: description: | The operating system that the daemon is running on ("linux" or "windows") type: "string" example: "linux" Arch: description: | The architecture that the daemon is running on type: "string" example: "amd64" KernelVersion: description: | The kernel version (`uname -r`) that the daemon is running on. This field is omitted when empty. type: "string" example: "4.19.76-linuxkit" Experimental: description: | Indicates if the daemon is started with experimental features enabled. This field is omitted when empty / false. type: "boolean" example: true BuildTime: description: | The date and time that the daemon was compiled. type: "string" example: "2020-06-22T15:49:27.000000000+00:00" SystemInfo: type: "object" properties: ID: description: | Unique identifier of the daemon. <p><br /></p> > **Note**: The format of the ID itself is not part of the API, and > should not be considered stable. type: "string" example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" Containers: description: "Total number of containers on the host." type: "integer" example: 14 ContainersRunning: description: | Number of containers with status `"running"`. type: "integer" example: 3 ContainersPaused: description: | Number of containers with status `"paused"`. type: "integer" example: 1 ContainersStopped: description: | Number of containers with status `"stopped"`. type: "integer" example: 10 Images: description: | Total number of images on the host. Both _tagged_ and _untagged_ (dangling) images are counted. type: "integer" example: 508 Driver: description: "Name of the storage driver in use." type: "string" example: "overlay2" DriverStatus: description: | Information specific to the storage driver, provided as "label" / "value" pairs. This information is provided by the storage driver, and formatted in a way consistent with the output of `docker info` on the command line. <p><br /></p> > **Note**: The information returned in this field, including the > formatting of values and labels, should not be considered stable, > and may change without notice. type: "array" items: type: "array" items: type: "string" example: - ["Backing Filesystem", "extfs"] - ["Supports d_type", "true"] - ["Native Overlay Diff", "true"] DockerRootDir: description: | Root directory of persistent Docker state. Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` on Windows. type: "string" example: "/var/lib/docker" Plugins: $ref: "#/definitions/PluginsInfo" MemoryLimit: description: "Indicates if the host has memory limit support enabled." type: "boolean" example: true SwapLimit: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true KernelMemory: description: | Indicates if the host has kernel memory limit support enabled. <p><br /></p> > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated > `kmem.limit_in_bytes`. type: "boolean" example: true CpuCfsPeriod: description: | Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host. type: "boolean" example: true CpuCfsQuota: description: | Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host. type: "boolean" example: true CPUShares: description: | Indicates if CPU Shares limiting is supported by the host. type: "boolean" example: true CPUSet: description: | Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) type: "boolean" example: true PidsLimit: description: "Indicates if the host kernel has PID limit support enabled." type: "boolean" example: true OomKillDisable: description: "Indicates if OOM killer disable is supported on the host." type: "boolean" IPv4Forwarding: description: "Indicates IPv4 forwarding is enabled." type: "boolean" example: true BridgeNfIptables: description: "Indicates if `bridge-nf-call-iptables` is available on the host." type: "boolean" example: true BridgeNfIp6tables: description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." type: "boolean" example: true Debug: description: | Indicates if the daemon is running in debug-mode / with debug-level logging enabled. type: "boolean" example: true NFd: description: | The total number of file Descriptors in use by the daemon process. This information is only returned if debug-mode is enabled. type: "integer" example: 64 NGoroutines: description: | The number of goroutines that currently exist. This information is only returned if debug-mode is enabled. type: "integer" example: 174 SystemTime: description: | Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" example: "2017-08-08T20:28:29.06202363Z" LoggingDriver: description: | The logging driver to use as a default for new containers. type: "string" CgroupDriver: description: | The driver to use for managing cgroups. type: "string" enum: ["cgroupfs", "systemd", "none"] default: "cgroupfs" example: "cgroupfs" CgroupVersion: description: | The version of the cgroup. type: "string" enum: ["1", "2"] default: "1" example: "1" NEventsListener: description: "Number of event listeners subscribed." type: "integer" example: 30 KernelVersion: description: | Kernel version of the host. On Linux, this information obtained from `uname`. On Windows this information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd> registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. type: "string" example: "4.9.38-moby" OperatingSystem: description: | Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" or "Windows Server 2016 Datacenter" type: "string" example: "Alpine Linux v3.5" OSVersion: description: | Version of the host's operating system <p><br /></p> > **Note**: The information returned in this field, including its > very existence, and the formatting of values, should not be considered > stable, and may change without notice. type: "string" example: "16.04" OSType: description: | Generic type of the operating system of the host, as returned by the Go runtime (`GOOS`). Currently returned values are "linux" and "windows". A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). type: "string" example: "linux" Architecture: description: | Hardware architecture of the host, as returned by the Go runtime (`GOARCH`). A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). type: "string" example: "x86_64" NCPU: description: | The number of logical CPUs usable by the daemon. The number of available CPUs is checked by querying the operating system when the daemon starts. Changes to operating system CPU allocation after the daemon is started are not reflected. type: "integer" example: 4 MemTotal: description: | Total amount of physical memory available on the host, in bytes. type: "integer" format: "int64" example: 2095882240 IndexServerAddress: description: | Address / URL of the index server that is used for image search, and as a default for user authentication for Docker Hub and Docker Cloud. default: "https://index.docker.io/v1/" type: "string" example: "https://index.docker.io/v1/" RegistryConfig: $ref: "#/definitions/RegistryServiceConfig" GenericResources: $ref: "#/definitions/GenericResources" HttpProxy: description: | HTTP-proxy configured for the daemon. This value is obtained from the [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL are masked in the API response. Containers do not automatically inherit this configuration. type: "string" example: "http://xxxxx:[email protected]:8080" HttpsProxy: description: | HTTPS-proxy configured for the daemon. This value is obtained from the [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL are masked in the API response. Containers do not automatically inherit this configuration. type: "string" example: "https://xxxxx:[email protected]:4443" NoProxy: description: | Comma-separated list of domain extensions for which no proxy should be used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Containers do not automatically inherit this configuration. type: "string" example: "*.local, 169.254/16" Name: description: "Hostname of the host." type: "string" example: "node5.corp.example.com" Labels: description: | User-defined labels (key/value metadata) as set on the daemon. <p><br /></p> > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, > set through the daemon configuration, and _node_ labels, set from a > manager node in the Swarm. Node labels are not included in this > field. Node labels can be retrieved using the `/nodes/(id)` endpoint > on a manager node in the Swarm. type: "array" items: type: "string" example: ["storage=ssd", "production"] ExperimentalBuild: description: | Indicates if experimental features are enabled on the daemon. type: "boolean" example: true ServerVersion: description: | Version string of the daemon. > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) > returns the Swarm version instead of the daemon version, for example > `swarm/1.2.8`. type: "string" example: "17.06.0-ce" ClusterStore: description: | URL of the distributed storage backend. The storage backend is used for multihost networking (to store network and endpoint information) and by the node discovery mechanism. <p><br /></p> > **Deprecated**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. type: "string" example: "consul://consul.corp.example.com:8600/some/path" ClusterAdvertise: description: | The network endpoint that the Engine advertises for the purpose of node discovery. ClusterAdvertise is a `host:port` combination on which the daemon is reachable by other hosts. <p><br /></p> > **Deprecated**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. type: "string" example: "node5.corp.example.com:8000" Runtimes: description: | List of [OCI compliant](https://github.com/opencontainers/runtime-spec) runtimes configured on the daemon. Keys hold the "name" used to reference the runtime. The Docker daemon relies on an OCI compliant runtime (invoked via the `containerd` daemon) as its interface to the Linux kernel namespaces, cgroups, and SELinux. The default runtime is `runc`, and automatically configured. Additional runtimes can be configured by the user and will be listed here. type: "object" additionalProperties: $ref: "#/definitions/Runtime" default: runc: path: "runc" example: runc: path: "runc" runc-master: path: "/go/bin/runc" custom: path: "/usr/local/bin/my-oci-runtime" runtimeArgs: ["--debug", "--systemd-cgroup=false"] DefaultRuntime: description: | Name of the default OCI runtime that is used when starting containers. The default can be overridden per-container at create time. type: "string" default: "runc" example: "runc" Swarm: $ref: "#/definitions/SwarmInfo" LiveRestoreEnabled: description: | Indicates if live restore is enabled. If enabled, containers are kept running when the daemon is shutdown or upon daemon start if running containers are detected. type: "boolean" default: false example: false Isolation: description: | Represents the isolation technology to use as a default for containers. The supported values are platform-specific. If no isolation value is specified on daemon start, on Windows client, the default is `hyperv`, and on Windows server, the default is `process`. This option is currently not used on other platforms. default: "default" type: "string" enum: - "default" - "hyperv" - "process" InitBinary: description: | Name and, optional, path of the `docker-init` binary. If the path is omitted, the daemon searches the host's `$PATH` for the binary and uses the first result. type: "string" example: "docker-init" ContainerdCommit: $ref: "#/definitions/Commit" RuncCommit: $ref: "#/definitions/Commit" InitCommit: $ref: "#/definitions/Commit" SecurityOptions: description: | List of security features that are enabled on the daemon, such as apparmor, seccomp, SELinux, user-namespaces (userns), and rootless. Additional configuration options for each security feature may be present, and are included as a comma-separated list of key/value pairs. type: "array" items: type: "string" example: - "name=apparmor" - "name=seccomp,profile=default" - "name=selinux" - "name=userns" - "name=rootless" ProductLicense: description: | Reports a summary of the product license on the daemon. If a commercial license has been applied to the daemon, information such as number of nodes, and expiration are included. type: "string" example: "Community Engine" DefaultAddressPools: description: | List of custom default address pools for local networks, which can be specified in the daemon.json file or dockerd option. Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 10.10.[0-255].0/24 address pools. type: "array" items: type: "object" properties: Base: description: "The network address in CIDR format" type: "string" example: "10.10.0.0/16" Size: description: "The network pool size" type: "integer" example: "24" Warnings: description: | List of warnings / informational messages about missing features, or issues related to the daemon configuration. These messages can be printed by the client as information to the user. type: "array" items: type: "string" example: - "WARNING: No memory limit support" - "WARNING: bridge-nf-call-iptables is disabled" - "WARNING: bridge-nf-call-ip6tables is disabled" # PluginsInfo is a temp struct holding Plugins name # registered with docker daemon. It is used by Info struct PluginsInfo: description: | Available plugins per type. <p><br /></p> > **Note**: Only unmanaged (V1) plugins are included in this list. > V1 plugins are "lazily" loaded, and are not returned in this list > if there is no resource using the plugin. type: "object" properties: Volume: description: "Names of available volume-drivers, and network-driver plugins." type: "array" items: type: "string" example: ["local"] Network: description: "Names of available network-drivers, and network-driver plugins." type: "array" items: type: "string" example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] Authorization: description: "Names of available authorization plugins." type: "array" items: type: "string" example: ["img-authz-plugin", "hbm"] Log: description: "Names of available logging-drivers, and logging-driver plugins." type: "array" items: type: "string" example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] RegistryServiceConfig: description: | RegistryServiceConfig stores daemon registry services configuration. type: "object" x-nullable: true properties: AllowNondistributableArtifactsCIDRs: description: | List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). Some images (for example, Windows base images) contain artifacts whose distribution is restricted by license. When these images are pushed to a registry, restricted artifacts are not included. This configuration override this behavior, and enables the daemon to push nondistributable artifacts to all registries whose resolved IP address is within the subnet described by the CIDR syntax. This option is useful when pushing images containing nondistributable artifacts to a registry on an air-gapped network so hosts on that network can pull the images without connecting to another server. > **Warning**: Nondistributable artifacts typically have restrictions > on how and where they can be distributed and shared. Only use this > feature to push artifacts to private registries and ensure that you > are in compliance with any terms that cover redistributing > nondistributable artifacts. type: "array" items: type: "string" example: ["::1/128", "127.0.0.0/8"] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `<hostname>[:<port>]` or `<IP address>[:<port>]`. Some images (for example, Windows base images) contain artifacts whose distribution is restricted by license. When these images are pushed to a registry, restricted artifacts are not included. This configuration override this behavior for the specified registries. This option is useful when pushing images containing nondistributable artifacts to a registry on an air-gapped network so hosts on that network can pull the images without connecting to another server. > **Warning**: Nondistributable artifacts typically have restrictions > on how and where they can be distributed and shared. Only use this > feature to push artifacts to private registries and ensure that you > are in compliance with any terms that cover redistributing > nondistributable artifacts. type: "array" items: type: "string" example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. By default, local registries (`127.0.0.0/8`) are configured as insecure. All other registries are secure. Communicating with an insecure registry is not possible if the daemon assumes that registry is secure. This configuration override this behavior, insecure communication with registries whose resolved IP address is within the subnet described by the CIDR syntax. Registries can also be marked insecure by hostname. Those registries are listed under `IndexConfigs` and have their `Secure` field set to `false`. > **Warning**: Using this option can be useful when running a local > registry, but introduces security vulnerabilities. This option > should therefore ONLY be used for testing purposes. For increased > security, users should add their CA to their system's list of trusted > CAs instead of enabling this option. type: "array" items: type: "string" example: ["::1/128", "127.0.0.0/8"] IndexConfigs: type: "object" additionalProperties: $ref: "#/definitions/IndexInfo" example: "127.0.0.1:5000": "Name": "127.0.0.1:5000" "Mirrors": [] "Secure": false "Official": false "[2001:db8:a0b:12f0::1]:80": "Name": "[2001:db8:a0b:12f0::1]:80" "Mirrors": [] "Secure": false "Official": false "docker.io": Name: "docker.io" Mirrors: ["https://hub-mirror.corp.example.com:5000/"] Secure: true Official: true "registry.internal.corp.example.com:3000": Name: "registry.internal.corp.example.com:3000" Mirrors: [] Secure: false Official: false Mirrors: description: | List of registry URLs that act as a mirror for the official (`docker.io`) registry. type: "array" items: type: "string" example: - "https://hub-mirror.corp.example.com:5000/" - "https://[2001:db8:a0b:12f0::1]/" IndexInfo: description: IndexInfo contains information about a registry. type: "object" x-nullable: true properties: Name: description: | Name of the registry, such as "docker.io". type: "string" example: "docker.io" Mirrors: description: | List of mirrors, expressed as URIs. type: "array" items: type: "string" example: - "https://hub-mirror.corp.example.com:5000/" - "https://registry-2.docker.io/" - "https://registry-3.docker.io/" Secure: description: | Indicates if the registry is part of the list of insecure registries. If `false`, the registry is insecure. Insecure registries accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. > **Warning**: Insecure registries can be useful when running a local > registry. However, because its use creates security vulnerabilities > it should ONLY be enabled for testing purposes. For increased > security, users should add their CA to their system's list of > trusted CAs instead of enabling this option. type: "boolean" example: true Official: description: | Indicates whether this is an official registry (i.e., Docker Hub / docker.io) type: "boolean" example: true Runtime: description: | Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) runtime. The runtime is invoked by the daemon via the `containerd` daemon. OCI runtimes act as an interface to the Linux kernel namespaces, cgroups, and SELinux. type: "object" properties: path: description: | Name and, optional, path, of the OCI executable binary. If the path is omitted, the daemon searches the host's `$PATH` for the binary and uses the first result. type: "string" example: "/usr/local/bin/my-oci-runtime" runtimeArgs: description: | List of command-line arguments to pass to the runtime when invoked. type: "array" x-nullable: true items: type: "string" example: ["--debug", "--systemd-cgroup=false"] Commit: description: | Commit holds the Git-commit (SHA1) that a binary was built from, as reported in the version-string of external tools, such as `containerd`, or `runC`. type: "object" properties: ID: description: "Actual commit ID of external tool." type: "string" example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" Expected: description: | Commit ID of external tool expected by dockerd as set at build time. type: "string" example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" SwarmInfo: description: | Represents generic information about swarm. type: "object" properties: NodeID: description: "Unique identifier of for this node in the swarm." type: "string" default: "" example: "k67qz4598weg5unwwffg6z1m1" NodeAddr: description: | IP address at which this node can be reached by other nodes in the swarm. type: "string" default: "" example: "10.0.0.46" LocalNodeState: $ref: "#/definitions/LocalNodeState" ControlAvailable: type: "boolean" default: false example: true Error: type: "string" default: "" RemoteManagers: description: | List of ID's and addresses of other managers in the swarm. type: "array" default: null x-nullable: true items: $ref: "#/definitions/PeerNode" example: - NodeID: "71izy0goik036k48jg985xnds" Addr: "10.0.0.158:2377" - NodeID: "79y6h1o4gv8n120drcprv5nmc" Addr: "10.0.0.159:2377" - NodeID: "k67qz4598weg5unwwffg6z1m1" Addr: "10.0.0.46:2377" Nodes: description: "Total number of nodes in the swarm." type: "integer" x-nullable: true example: 4 Managers: description: "Total number of managers in the swarm." type: "integer" x-nullable: true example: 3 Cluster: $ref: "#/definitions/ClusterInfo" LocalNodeState: description: "Current local status of this node." type: "string" default: "" enum: - "" - "inactive" - "pending" - "active" - "error" - "locked" example: "active" PeerNode: description: "Represents a peer-node in the swarm" properties: NodeID: description: "Unique identifier of for this node in the swarm." type: "string" Addr: description: | IP address and ports at which this node can be reached. type: "string" NetworkAttachmentConfig: description: | Specifies how a service should be attached to a particular network. type: "object" properties: Target: description: | The target network for attachment. Must be a network name or ID. type: "string" Aliases: description: | Discoverable alternate names for the service on this network. type: "array" items: type: "string" DriverOpts: description: | Driver attachment options for the network target. type: "object" additionalProperties: type: "string" paths: /containers/json: get: summary: "List containers" description: | Returns a list of containers. For details on the format, see the [inspect endpoint](#operation/ContainerInspect). Note that it uses a different, smaller representation of a container than inspecting a single container. For example, the list of linked containers is not propagated . operationId: "ContainerList" produces: - "application/json" parameters: - name: "all" in: "query" description: | Return all containers. By default, only running containers are shown. type: "boolean" default: false - name: "limit" in: "query" description: | Return this number of most recently created containers, including non-running ones. type: "integer" - name: "size" in: "query" description: | Return the size of container as fields `SizeRw` and `SizeRootFs`. type: "boolean" default: false - name: "filters" in: "query" description: | Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`) - `before`=(`<container id>` or `<container name>`) - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) - `exited=<int>` containers with exit code of `<int>` - `health`=(`starting`|`healthy`|`unhealthy`|`none`) - `id=<ID>` a container's ID - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) - `is-task=`(`true`|`false`) - `label=key` or `label="key=value"` of a container label - `name=<name>` a container's name - `network`=(`<network id>` or `<network name>`) - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) - `since`=(`<container id>` or `<container name>`) - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) - `volume`=(`<volume name>` or `<mount point destination>`) type: "string" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/ContainerSummary" examples: application/json: - Id: "8dfafdbc3a40" Names: - "/boring_feynman" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 1" Created: 1367854155 State: "Exited" Status: "Exit 0" Ports: - PrivatePort: 2222 PublicPort: 3333 Type: "tcp" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" Gateway: "172.17.0.1" IPAddress: "172.17.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:02" Mounts: - Name: "fac362...80535" Source: "/data" Destination: "/data" Driver: "local" Mode: "ro,Z" RW: false Propagation: "" - Id: "9cd87474be90" Names: - "/coolName" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 222222" Created: 1367854155 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" Gateway: "172.17.0.1" IPAddress: "172.17.0.8" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:08" Mounts: [] - Id: "3176a2479c92" Names: - "/sleepy_dog" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 3333333333333333" Created: 1367854154 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" Gateway: "172.17.0.1" IPAddress: "172.17.0.6" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:06" Mounts: [] - Id: "4cb07b47f9fb" Names: - "/running_cat" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 444444444444444444444444444444444" Created: 1367854152 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" Gateway: "172.17.0.1" IPAddress: "172.17.0.5" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:05" Mounts: [] 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /containers/create: post: summary: "Create a container" operationId: "ContainerCreate" consumes: - "application/json" - "application/octet-stream" produces: - "application/json" parameters: - name: "name" in: "query" description: | Assign the specified name to the container. Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. type: "string" pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - name: "body" in: "body" description: "Container to create" schema: allOf: - $ref: "#/definitions/ContainerConfig" - type: "object" properties: HostConfig: $ref: "#/definitions/HostConfig" NetworkingConfig: $ref: "#/definitions/NetworkingConfig" example: Hostname: "" Domainname: "" User: "" AttachStdin: false AttachStdout: true AttachStderr: true Tty: false OpenStdin: false StdinOnce: false Env: - "FOO=bar" - "BAZ=quux" Cmd: - "date" Entrypoint: "" Image: "ubuntu" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" Volumes: /volumes/data: {} WorkingDir: "" NetworkDisabled: false MacAddress: "12:34:56:78:9a:bc" ExposedPorts: 22/tcp: {} StopSignal: "SIGTERM" StopTimeout: 10 HostConfig: Binds: - "/tmp:/tmp" Links: - "redis3:redis" Memory: 0 MemorySwap: 0 MemoryReservation: 0 KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 CpuPeriod: 100000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 CpuQuota: 50000 CpusetCpus: "0,1" CpusetMems: "0,1" MaximumIOps: 0 MaximumIOBps: 0 BlkioWeight: 300 BlkioWeightDevice: - {} BlkioDeviceReadBps: - {} BlkioDeviceReadIOps: - {} BlkioDeviceWriteBps: - {} BlkioDeviceWriteIOps: - {} DeviceRequests: - Driver: "nvidia" Count: -1 DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] Capabilities: [["gpu", "nvidia", "compute"]] Options: property1: "string" property2: "string" MemorySwappiness: 60 OomKillDisable: false OomScoreAdj: 500 PidMode: "" PidsLimit: 0 PortBindings: 22/tcp: - HostPort: "11022" PublishAllPorts: false Privileged: false ReadonlyRootfs: false Dns: - "8.8.8.8" DnsOptions: - "" DnsSearch: - "" VolumesFrom: - "parent" - "other:ro" CapAdd: - "NET_ADMIN" CapDrop: - "MKNOD" GroupAdd: - "newgroup" RestartPolicy: Name: "" MaximumRetryCount: 0 AutoRemove: true NetworkMode: "bridge" Devices: [] Ulimits: - {} LogConfig: Type: "json-file" Config: {} SecurityOpt: [] StorageOpt: {} CgroupParent: "" VolumeDriver: "" ShmSize: 67108864 NetworkingConfig: EndpointsConfig: isolated_nw: IPAMConfig: IPv4Address: "172.20.30.33" IPv6Address: "2001:db8:abcd::3033" LinkLocalIPs: - "169.254.34.68" - "fe80::3468" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" required: true responses: 201: description: "Container created successfully" schema: type: "object" title: "ContainerCreateResponse" description: "OK response to ContainerCreate operation" required: [Id, Warnings] properties: Id: description: "The ID of the created container" type: "string" x-nullable: false Warnings: description: "Warnings encountered when creating the container" type: "array" x-nullable: false items: type: "string" examples: application/json: Id: "e90e34656806" Warnings: [] 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such image" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: c2ada9df5af8" 409: description: "conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /containers/{id}/json: get: summary: "Inspect a container" description: "Return low-level information about a container." operationId: "ContainerInspect" produces: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "ContainerInspectResponse" properties: Id: description: "The ID of the container" type: "string" Created: description: "The time the container was created" type: "string" Path: description: "The path to the command being run" type: "string" Args: description: "The arguments to the command being run" type: "array" items: type: "string" State: x-nullable: true $ref: "#/definitions/ContainerState" Image: description: "The container's image ID" type: "string" ResolvConfPath: type: "string" HostnamePath: type: "string" HostsPath: type: "string" LogPath: type: "string" Name: type: "string" RestartCount: type: "integer" Driver: type: "string" Platform: type: "string" MountLabel: type: "string" ProcessLabel: type: "string" AppArmorProfile: type: "string" ExecIDs: description: "IDs of exec instances that are running in the container." type: "array" items: type: "string" x-nullable: true HostConfig: $ref: "#/definitions/HostConfig" GraphDriver: $ref: "#/definitions/GraphDriverData" SizeRw: description: | The size of files that have been created or changed by this container. type: "integer" format: "int64" SizeRootFs: description: "The total size of all the files in this container." type: "integer" format: "int64" Mounts: type: "array" items: $ref: "#/definitions/MountPoint" Config: $ref: "#/definitions/ContainerConfig" NetworkSettings: $ref: "#/definitions/NetworkSettings" examples: application/json: AppArmorProfile: "" Args: - "-c" - "exit 9" Config: AttachStderr: true AttachStdin: false AttachStdout: true Cmd: - "/bin/sh" - "-c" - "exit 9" Domainname: "" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Healthcheck: Test: ["CMD-SHELL", "exit 0"] Hostname: "ba033ac44011" Image: "ubuntu" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" MacAddress: "" NetworkDisabled: false OpenStdin: false StdinOnce: false Tty: false User: "" Volumes: /volumes/data: {} WorkingDir: "" StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" Driver: "devicemapper" ExecIDs: - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 BlkioWeight: 0 BlkioWeightDevice: - {} BlkioDeviceReadBps: - {} BlkioDeviceWriteBps: - {} BlkioDeviceReadIOps: - {} BlkioDeviceWriteIOps: - {} ContainerIDFile: "" CpusetCpus: "" CpusetMems: "" CpuPercent: 80 CpuShares: 0 CpuPeriod: 100000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 Devices: [] DeviceRequests: - Driver: "nvidia" Count: -1 DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] Capabilities: [["gpu", "nvidia", "compute"]] Options: property1: "string" property2: "string" IpcMode: "" LxcConf: [] Memory: 0 MemorySwap: 0 MemoryReservation: 0 KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" PidMode: "" PortBindings: {} Privileged: false ReadonlyRootfs: false PublishAllPorts: false RestartPolicy: MaximumRetryCount: 2 Name: "on-failure" LogConfig: Type: "json-file" Sysctls: net.ipv4.ip_forward: "1" Ulimits: - {} VolumeDriver: "" ShmSize: 67108864 HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" MountLabel: "" Name: "/boring_euclid" NetworkSettings: Bridge: "" SandboxID: "" HairpinMode: false LinkLocalIPv6Address: "" LinkLocalIPv6PrefixLen: 0 SandboxKey: "" EndpointID: "" Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 IPAddress: "" IPPrefixLen: 0 IPv6Gateway: "" MacAddress: "" Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" Gateway: "172.17.0.1" IPAddress: "172.17.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:12:00:02" Path: "/bin/sh" ProcessLabel: "" ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" RestartCount: 1 State: Error: "" ExitCode: 9 FinishedAt: "2015-01-06T15:47:32.080254511Z" Health: Status: "healthy" FailingStreak: 0 Log: - Start: "2019-12-22T10:59:05.6385933Z" End: "2019-12-22T10:59:05.8078452Z" ExitCode: 0 Output: "" OOMKilled: false Dead: false Paused: false Pid: 0 Restarting: false Running: true StartedAt: "2015-01-06T15:47:32.072697474Z" Status: "running" Mounts: - Name: "fac362...80535" Source: "/data" Destination: "/data" Driver: "local" Mode: "ro,Z" RW: false Propagation: "" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "size" in: "query" type: "boolean" default: false description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" tags: ["Container"] /containers/{id}/top: get: summary: "List processes running inside a container" description: | On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows. operationId: "ContainerTop" responses: 200: description: "no error" schema: type: "object" title: "ContainerTopResponse" description: "OK response to ContainerTop operation" properties: Titles: description: "The ps column titles" type: "array" items: type: "string" Processes: description: | Each process running in the container, where each is process is an array of values corresponding to the titles. type: "array" items: type: "array" items: type: "string" examples: application/json: Titles: - "UID" - "PID" - "PPID" - "C" - "STIME" - "TTY" - "TIME" - "CMD" Processes: - - "root" - "13642" - "882" - "0" - "17:03" - "pts/0" - "00:00:00" - "/bin/bash" - - "root" - "13735" - "13642" - "0" - "17:06" - "pts/0" - "00:00:00" - "sleep 10" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "ps_args" in: "query" description: "The arguments to pass to `ps`. For example, `aux`" type: "string" default: "-ef" tags: ["Container"] /containers/{id}/logs: get: summary: "Get container logs" description: | Get `stdout` and `stderr` logs from a container. Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. operationId: "ContainerLogs" responses: 200: description: | logs returned as a stream in response body. For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). Note that unlike the attach endpoint, the logs endpoint does not upgrade the connection and does not set Content-Type. schema: type: "string" format: "binary" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "until" in: "query" description: "Only return logs before this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Container"] /containers/{id}/changes: get: summary: "Get changes on a container’s filesystem" description: | Returns which files in a container's filesystem have been added, deleted, or modified. The `Kind` of modification can be one of: - `0`: Modified - `1`: Added - `2`: Deleted operationId: "ContainerChanges" produces: ["application/json"] responses: 200: description: "The list of changes" schema: type: "array" items: type: "object" x-go-name: "ContainerChangeResponseItem" title: "ContainerChangeResponseItem" description: "change item in response to ContainerChanges operation" required: [Path, Kind] properties: Path: description: "Path to file that has changed" type: "string" x-nullable: false Kind: description: "Kind of change" type: "integer" format: "uint8" enum: [0, 1, 2] x-nullable: false examples: application/json: - Path: "/dev" Kind: 0 - Path: "/dev/kmsg" Kind: 1 - Path: "/test" Kind: 1 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/export: get: summary: "Export a container" description: "Export the contents of a container as a tarball." operationId: "ContainerExport" produces: - "application/octet-stream" responses: 200: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/stats: get: summary: "Get container stats based on resource usage" description: | This endpoint returns a live stream of a container’s resource usage statistics. The `precpu_stats` is the CPU statistic of the *previous* read, and is used to calculate the CPU usage percentage. It is not an exact copy of the `cpu_stats` field. If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is nil then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. On a cgroup v2 host, the following fields are not set * `blkio_stats`: all fields other than `io_service_bytes_recursive` * `cpu_stats`: `cpu_usage.percpu_usage` * `memory_stats`: `max_usage` and `failcnt` Also, `memory_stats.stats` fields are incompatible with cgroup v1. To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: * used_memory = `memory_stats.usage - memory_stats.stats.cache` * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` operationId: "ContainerStats" produces: ["application/json"] responses: 200: description: "no error" schema: type: "object" examples: application/json: read: "2015-01-08T22:57:31.547920715Z" pids_stats: current: 3 networks: eth0: rx_bytes: 5338 rx_dropped: 0 rx_errors: 0 rx_packets: 36 tx_bytes: 648 tx_dropped: 0 tx_errors: 0 tx_packets: 8 eth5: rx_bytes: 4641 rx_dropped: 0 rx_errors: 0 rx_packets: 26 tx_bytes: 690 tx_dropped: 0 tx_errors: 0 tx_packets: 9 memory_stats: stats: total_pgmajfault: 0 cache: 0 mapped_file: 0 total_inactive_file: 0 pgpgout: 414 rss: 6537216 total_mapped_file: 0 writeback: 0 unevictable: 0 pgpgin: 477 total_unevictable: 0 pgmajfault: 0 total_rss: 6537216 total_rss_huge: 6291456 total_writeback: 0 total_inactive_anon: 0 rss_huge: 6291456 hierarchical_memory_limit: 67108864 total_pgfault: 964 total_active_file: 0 active_anon: 6537216 total_active_anon: 6537216 total_pgpgout: 414 total_cache: 0 inactive_anon: 0 active_file: 0 pgfault: 964 inactive_file: 0 total_pgpgin: 477 max_usage: 6651904 usage: 6537216 failcnt: 0 limit: 67108864 blkio_stats: {} cpu_stats: cpu_usage: percpu_usage: - 8646879 - 24472255 - 36438778 - 30657443 usage_in_usermode: 50000000 total_usage: 100215355 usage_in_kernelmode: 30000000 system_cpu_usage: 739306590000000 online_cpus: 4 throttling_data: periods: 0 throttled_periods: 0 throttled_time: 0 precpu_stats: cpu_usage: percpu_usage: - 8646879 - 24350896 - 36438778 - 30657443 usage_in_usermode: 50000000 total_usage: 100093996 usage_in_kernelmode: 30000000 system_cpu_usage: 9492140000000 online_cpus: 4 throttling_data: periods: 0 throttled_periods: 0 throttled_time: 0 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "stream" in: "query" description: | Stream the output. If false, the stats will be output once and then it will disconnect. type: "boolean" default: true - name: "one-shot" in: "query" description: | Only get a single stat instead of waiting for 2 cycles. Must be used with `stream=false`. type: "boolean" default: false tags: ["Container"] /containers/{id}/resize: post: summary: "Resize a container TTY" description: "Resize the TTY for a container." operationId: "ContainerResize" consumes: - "application/octet-stream" produces: - "text/plain" responses: 200: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "cannot resize container" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "h" in: "query" description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" description: "Width of the TTY session in characters" type: "integer" tags: ["Container"] /containers/{id}/start: post: summary: "Start a container" operationId: "ContainerStart" responses: 204: description: "no error" 304: description: "container already started" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. type: "string" tags: ["Container"] /containers/{id}/stop: post: summary: "Stop a container" operationId: "ContainerStop" responses: 204: description: "no error" 304: description: "container already stopped" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" type: "integer" tags: ["Container"] /containers/{id}/restart: post: summary: "Restart a container" operationId: "ContainerRestart" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" type: "integer" tags: ["Container"] /containers/{id}/kill: post: summary: "Kill a container" description: | Send a POSIX signal to a container, defaulting to killing to the container. operationId: "ContainerKill" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "container is not running" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "signal" in: "query" description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" type: "string" default: "SIGKILL" tags: ["Container"] /containers/{id}/update: post: summary: "Update a container" description: | Change various configuration options of a container without having to recreate it. operationId: "ContainerUpdate" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "The container has been updated." schema: type: "object" title: "ContainerUpdateResponse" description: "OK response to ContainerUpdate operation" properties: Warnings: type: "array" items: type: "string" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "update" in: "body" required: true schema: allOf: - $ref: "#/definitions/Resources" - type: "object" properties: RestartPolicy: $ref: "#/definitions/RestartPolicy" example: BlkioWeight: 300 CpuShares: 512 CpuPeriod: 100000 CpuQuota: 50000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 CpusetCpus: "0,1" CpusetMems: "0" Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" tags: ["Container"] /containers/{id}/rename: post: summary: "Rename a container" operationId: "ContainerRename" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "name already in use" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "name" in: "query" required: true description: "New name for the container" type: "string" tags: ["Container"] /containers/{id}/pause: post: summary: "Pause a container" description: | Use the freezer cgroup to suspend all processes in a container. Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the freezer cgroup the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. operationId: "ContainerPause" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/unpause: post: summary: "Unpause a container" description: "Resume a container which has been paused." operationId: "ContainerUnpause" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/attach: post: summary: "Attach to a container" description: | Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. ### Hijacking This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. This is the response from the daemon for an attach request: ``` HTTP/1.1 200 OK Content-Type: application/vnd.docker.raw-stream [STREAM] ``` After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. For example, the client sends this request to upgrade the connection: ``` POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 Upgrade: tcp Connection: Upgrade ``` The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: ``` HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp [STREAM] ``` ### Stream format When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). It is encoded on the first eight bytes like this: ```go header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} ``` `STREAM_TYPE` can be: - 0: `stdin` (is written on `stdout`) - 1: `stdout` - 2: `stderr` `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. The simplest way to implement this protocol is the following: 1. Read 8 bytes. 2. Choose `stdout` or `stderr` depending on the first byte. 3. Extract the frame size from the last four bytes. 4. Read the extracted size and output it on the correct output. 5. Goto 1. ### Stream format when using a TTY When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. operationId: "ContainerAttach" produces: - "application/vnd.docker.raw-stream" responses: 101: description: "no error, hints proxy about hijacking" 200: description: "no error, no upgrade header found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. type: "string" - name: "logs" in: "query" description: | Replay previous logs from the container. This is useful for attaching to a container that has started and you want to output everything since the container started. If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. type: "boolean" default: false - name: "stream" in: "query" description: | Stream attached streams from the time the request was made onwards. type: "boolean" default: false - name: "stdin" in: "query" description: "Attach to `stdin`" type: "boolean" default: false - name: "stdout" in: "query" description: "Attach to `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Attach to `stderr`" type: "boolean" default: false tags: ["Container"] /containers/{id}/attach/ws: get: summary: "Attach to a container via a websocket" operationId: "ContainerAttachWebsocket" responses: 101: description: "no error, hints proxy about hijacking" 200: description: "no error, no upgrade header found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`. type: "string" - name: "logs" in: "query" description: "Return logs" type: "boolean" default: false - name: "stream" in: "query" description: "Return stream" type: "boolean" default: false - name: "stdin" in: "query" description: "Attach to `stdin`" type: "boolean" default: false - name: "stdout" in: "query" description: "Attach to `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Attach to `stderr`" type: "boolean" default: false tags: ["Container"] /containers/{id}/wait: post: summary: "Wait for a container" description: "Block until a container stops, then returns the exit code." operationId: "ContainerWait" produces: ["application/json"] responses: 200: description: "The container has exit." schema: type: "object" title: "ContainerWaitResponse" description: "OK response to ContainerWait operation" required: [StatusCode] properties: StatusCode: description: "Exit code of the container" type: "integer" x-nullable: false Error: description: "container waiting error, if any" type: "object" properties: Message: description: "Details of an error" type: "string" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "condition" in: "query" description: | Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'. type: "string" default: "not-running" tags: ["Container"] /containers/{id}: delete: summary: "Remove a container" operationId: "ContainerDelete" responses: 204: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "conflict" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: | You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "v" in: "query" description: "Remove anonymous volumes associated with the container." type: "boolean" default: false - name: "force" in: "query" description: "If the container is running, kill it before removing it." type: "boolean" default: false - name: "link" in: "query" description: "Remove the specified link associated with the container." type: "boolean" default: false tags: ["Container"] /containers/{id}/archive: head: summary: "Get information about files in a container" description: | A response header `X-Docker-Container-Path-Stat` is returned, containing a base64 - encoded JSON object with some filesystem header information about the path. operationId: "ContainerArchiveInfo" responses: 200: description: "no error" headers: X-Docker-Container-Path-Stat: type: "string" description: | A base64 - encoded JSON object with some filesystem header information about the path 400: description: "Bad parameter" schema: allOf: - $ref: "#/definitions/ErrorResponse" - type: "object" properties: message: description: | The error message. Either "must specify path parameter" (path cannot be empty) or "not a directory" (path was asserted to be a directory but exists as a file). type: "string" x-nullable: false 404: description: "Container or path does not exist" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Resource in the container’s filesystem to archive." type: "string" tags: ["Container"] get: summary: "Get an archive of a filesystem resource in a container" description: "Get a tar archive of a resource in the filesystem of container id." operationId: "ContainerArchive" produces: ["application/x-tar"] responses: 200: description: "no error" 400: description: "Bad parameter" schema: allOf: - $ref: "#/definitions/ErrorResponse" - type: "object" properties: message: description: | The error message. Either "must specify path parameter" (path cannot be empty) or "not a directory" (path was asserted to be a directory but exists as a file). type: "string" x-nullable: false 404: description: "Container or path does not exist" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Resource in the container’s filesystem to archive." type: "string" tags: ["Container"] put: summary: "Extract an archive of files or folders to a directory in a container" description: "Upload a tar archive to be extracted to a path in the filesystem of container id." operationId: "PutContainerArchive" consumes: ["application/x-tar", "application/octet-stream"] responses: 200: description: "The content was extracted successfully" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 403: description: "Permission denied, the volume or container rootfs is marked as read-only." schema: $ref: "#/definitions/ErrorResponse" 404: description: "No such container or path does not exist inside the container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Path to a directory in the container to extract the archive’s contents into. " type: "string" - name: "noOverwriteDirNonDir" in: "query" description: | If `1`, `true`, or `True` then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa. type: "string" - name: "copyUIDGID" in: "query" description: | If `1`, `true`, then it will copy UID/GID maps to the dest file or dir type: "string" - name: "inputStream" in: "body" required: true description: | The input stream must be a tar archive compressed with one of the following algorithms: `identity` (no compression), `gzip`, `bzip2`, or `xz`. schema: type: "string" format: "binary" tags: ["Container"] /containers/prune: post: summary: "Delete stopped containers" produces: - "application/json" operationId: "ContainerPrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "ContainerPruneResponse" properties: ContainersDeleted: description: "Container IDs that were deleted" type: "array" items: type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /images/json: get: summary: "List Images" description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." operationId: "ImageList" produces: - "application/json" responses: 200: description: "Summary image data for the images matching the query" schema: type: "array" items: $ref: "#/definitions/ImageSummary" examples: application/json: - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" ParentId: "" RepoTags: - "ubuntu:12.04" - "ubuntu:precise" RepoDigests: - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" Created: 1474925151 Size: 103579269 VirtualSize: 103579269 SharedSize: 0 Labels: {} Containers: 2 - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" ParentId: "" RepoTags: - "ubuntu:12.10" - "ubuntu:quantal" RepoDigests: - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" Created: 1403128455 Size: 172064416 VirtualSize: 172064416 SharedSize: 0 Labels: {} Containers: 5 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "all" in: "query" description: "Show all images. Only images from a final layer (no children) are shown by default." type: "boolean" default: false - name: "filters" in: "query" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) - `dangling=true` - `label=key` or `label="key=value"` of an image label - `reference`=(`<image-name>[:<tag>]`) - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) type: "string" - name: "shared-size" in: "query" description: "Compute and show shared size as a `SharedSize` field on each image." type: "boolean" default: false - name: "digests" in: "query" description: "Show digest information as a `RepoDigests` field on each image." type: "boolean" default: false tags: ["Image"] /build: post: summary: "Build an image" description: | Build an image from a tar archive with a `Dockerfile` in it. The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. The build is canceled if the client drops the connection by quitting or being killed. operationId: "ImageBuild" consumes: - "application/octet-stream" produces: - "application/json" parameters: - name: "inputStream" in: "body" description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." schema: type: "string" format: "binary" - name: "dockerfile" in: "query" description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." type: "string" default: "Dockerfile" - name: "t" in: "query" description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." type: "string" - name: "extrahosts" in: "query" description: "Extra hosts to add to /etc/hosts" type: "string" - name: "remote" in: "query" description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." type: "string" - name: "q" in: "query" description: "Suppress verbose build output." type: "boolean" default: false - name: "nocache" in: "query" description: "Do not use the cache when building the image." type: "boolean" default: false - name: "cachefrom" in: "query" description: "JSON array of images used for build cache resolution." type: "string" - name: "pull" in: "query" description: "Attempt to pull the image even if an older image exists locally." type: "string" - name: "rm" in: "query" description: "Remove intermediate containers after a successful build." type: "boolean" default: true - name: "forcerm" in: "query" description: "Always remove intermediate containers, even upon failure." type: "boolean" default: false - name: "memory" in: "query" description: "Set memory limit for build." type: "integer" - name: "memswap" in: "query" description: "Total memory (memory + swap). Set as `-1` to disable swap." type: "integer" - name: "cpushares" in: "query" description: "CPU shares (relative weight)." type: "integer" - name: "cpusetcpus" in: "query" description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." type: "string" - name: "cpuperiod" in: "query" description: "The length of a CPU period in microseconds." type: "integer" - name: "cpuquota" in: "query" description: "Microseconds of CPU time that the container can get in a CPU period." type: "integer" - name: "buildargs" in: "query" description: > JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) type: "string" - name: "shmsize" in: "query" description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." type: "integer" - name: "squash" in: "query" description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" type: "boolean" - name: "labels" in: "query" description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." type: "string" - name: "networkmode" in: "query" description: | Sets the networking mode for the run commands during build. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken as a custom network's name or ID to which this container should connect to. type: "string" - name: "Content-type" in: "header" type: "string" enum: - "application/x-tar" default: "application/x-tar" - name: "X-Registry-Config" in: "header" description: | This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: ``` { "docker.example.com": { "username": "janedoe", "password": "hunter2" }, "https://index.docker.io/v1/": { "username": "mobydock", "password": "conta1n3rize14" } } ``` Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. type: "string" - name: "platform" in: "query" description: "Platform in the format os[/arch[/variant]]" type: "string" default: "" - name: "target" in: "query" description: "Target build stage" type: "string" default: "" - name: "outputs" in: "query" description: "BuildKit output configuration" type: "string" default: "" responses: 200: description: "no error" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /build/prune: post: summary: "Delete builder cache" produces: - "application/json" operationId: "BuildPrune" parameters: - name: "keep-storage" in: "query" description: "Amount of disk space in bytes to keep for cache" type: "integer" format: "int64" - name: "all" in: "query" type: "boolean" description: "Remove all types of build cache" - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters: - `until=<duration>`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') - `id=<id>` - `parent=<id>` - `type=<string>` - `description=<string>` - `inuse` - `shared` - `private` responses: 200: description: "No error" schema: type: "object" title: "BuildPruneResponse" properties: CachesDeleted: type: "array" items: description: "ID of build cache object" type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /images/create: post: summary: "Create an image" description: "Create an image by either pulling it from a registry or importing it." operationId: "ImageCreate" consumes: - "text/plain" - "application/octet-stream" produces: - "application/json" responses: 200: description: "no error" 404: description: "repository does not exist or no read access" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "fromImage" in: "query" description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." type: "string" - name: "fromSrc" in: "query" description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." type: "string" - name: "repo" in: "query" description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." type: "string" - name: "tag" in: "query" description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." type: "string" - name: "message" in: "query" description: "Set commit message for imported image." type: "string" - name: "inputImage" in: "body" description: "Image content if the value `-` has been specified in fromSrc query parameter" schema: type: "string" required: false - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "changes" in: "query" description: | Apply `Dockerfile` instructions to the image that is created, for example: `changes=ENV DEBUG=true`. Note that `ENV DEBUG=true` should be URI component encoded. Supported `Dockerfile` instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` type: "array" items: type: "string" - name: "platform" in: "query" description: "Platform in the format os[/arch[/variant]]" type: "string" default: "" tags: ["Image"] /images/{name}/json: get: summary: "Inspect an image" description: "Return low-level information about an image." operationId: "ImageInspect" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/Image" examples: application/json: Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" Comment: "" Os: "linux" Architecture: "amd64" Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" ContainerConfig: Tty: false Hostname: "e611e15f9c9d" Domainname: "" AttachStdout: false PublishService: "" AttachStdin: false OpenStdin: false StdinOnce: false NetworkDisabled: false OnBuild: [] Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" User: "" WorkingDir: "" MacAddress: "" AttachStderr: false Labels: com.example.license: "GPL" com.example.version: "1.0" com.example.vendor: "Acme" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Cmd: - "/bin/sh" - "-c" - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" DockerVersion: "1.9.0-dev" VirtualSize: 188359297 Size: 0 Author: "" Created: "2015-09-10T08:30:53.26995814Z" GraphDriver: Name: "aufs" Data: {} RepoDigests: - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" RepoTags: - "example:1.0" - "example:latest" - "example:stable" Config: Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" NetworkDisabled: false OnBuild: [] StdinOnce: false PublishService: "" AttachStdin: false OpenStdin: false Domainname: "" AttachStdout: false Tty: false Hostname: "e611e15f9c9d" Cmd: - "/bin/bash" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Labels: com.example.vendor: "Acme" com.example.version: "1.0" com.example.license: "GPL" MacAddress: "" AttachStderr: false WorkingDir: "" User: "" RootFS: Type: "layers" Layers: - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: someimage (tag: latest)" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or id" type: "string" required: true tags: ["Image"] /images/{name}/history: get: summary: "Get the history of an image" description: "Return parent layers of an image." operationId: "ImageHistory" produces: ["application/json"] responses: 200: description: "List of image layers" schema: type: "array" items: type: "object" x-go-name: HistoryResponseItem title: "HistoryResponseItem" description: "individual image layer information in response to ImageHistory operation" required: [Id, Created, CreatedBy, Tags, Size, Comment] properties: Id: type: "string" x-nullable: false Created: type: "integer" format: "int64" x-nullable: false CreatedBy: type: "string" x-nullable: false Tags: type: "array" items: type: "string" Size: type: "integer" format: "int64" x-nullable: false Comment: type: "string" x-nullable: false examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" Created: 1398108230 CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" Tags: - "ubuntu:lucid" - "ubuntu:10.04" Size: 182964289 Comment: "" - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" Created: 1398108222 CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <[email protected]> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" Tags: [] Size: 0 Comment: "" - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" Created: 1371157430 CreatedBy: "" Tags: - "scratch12:latest" - "scratch:latest" Size: 0 Comment: "Imported from -" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true tags: ["Image"] /images/{name}/push: post: summary: "Push an image" description: | Push an image to a registry. If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. The push is cancelled if the HTTP connection is closed. operationId: "ImagePush" consumes: - "application/octet-stream" responses: 200: description: "No error" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID." type: "string" required: true - name: "tag" in: "query" description: "The tag to associate with the image on the registry." type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration. Refer to the [authentication section](#section/Authentication) for details. type: "string" required: true tags: ["Image"] /images/{name}/tag: post: summary: "Tag an image" description: "Tag an image so that it becomes part of a repository." operationId: "ImageTag" responses: 201: description: "No error" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID to tag." type: "string" required: true - name: "repo" in: "query" description: "The repository to tag in. For example, `someuser/someimage`." type: "string" - name: "tag" in: "query" description: "The name of the new tag." type: "string" tags: ["Image"] /images/{name}: delete: summary: "Remove an image" description: | Remove an image, along with any untagged parent images that were referenced by that image. Images can't be removed if they have descendant images, are being used by a running container or are being used by a build. operationId: "ImageDelete" produces: ["application/json"] responses: 200: description: "The image was deleted successfully" schema: type: "array" items: $ref: "#/definitions/ImageDeleteResponseItem" examples: application/json: - Untagged: "3e2f21a89f" - Deleted: "3e2f21a89f" - Deleted: "53b4f83ac9" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true - name: "force" in: "query" description: "Remove the image even if it is being used by stopped containers or has other tags" type: "boolean" default: false - name: "noprune" in: "query" description: "Do not delete untagged parent images" type: "boolean" default: false tags: ["Image"] /images/search: get: summary: "Search images" description: "Search for an image on Docker Hub." operationId: "ImageSearch" produces: - "application/json" responses: 200: description: "No error" schema: type: "array" items: type: "object" title: "ImageSearchResponseItem" properties: description: type: "string" is_official: type: "boolean" is_automated: type: "boolean" name: type: "string" star_count: type: "integer" examples: application/json: - description: "" is_official: false is_automated: false name: "wma55/u1210sshd" star_count: 0 - description: "" is_official: false is_automated: false name: "jdswinbank/sshd" star_count: 0 - description: "" is_official: false is_automated: false name: "vgauthier/sshd" star_count: 0 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "term" in: "query" description: "Term to search" type: "string" required: true - name: "limit" in: "query" description: "Maximum number of results to return" type: "integer" - name: "filters" in: "query" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - `is-automated=(true|false)` - `is-official=(true|false)` - `stars=<number>` Matches images that has at least 'number' stars. type: "string" tags: ["Image"] /images/prune: post: summary: "Delete unused images" produces: - "application/json" operationId: "ImagePrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `dangling=<boolean>` When set to `true` (or `1`), prune only unused *and* untagged images. When set to `false` (or `0`), all unused images are pruned. - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "ImagePruneResponse" properties: ImagesDeleted: description: "Images that were deleted" type: "array" items: $ref: "#/definitions/ImageDeleteResponseItem" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /auth: post: summary: "Check auth configuration" description: | Validate credentials for a registry and, if available, get an identity token for accessing the registry without password. operationId: "SystemAuth" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "An identity token was generated successfully." schema: type: "object" title: "SystemAuthResponse" required: [Status] properties: Status: description: "The status of the authentication" type: "string" x-nullable: false IdentityToken: description: "An opaque token used to authenticate a user after a successful login" type: "string" x-nullable: false examples: application/json: Status: "Login Succeeded" IdentityToken: "9cbaf023786cd7..." 204: description: "No error" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "authConfig" in: "body" description: "Authentication to check" schema: $ref: "#/definitions/AuthConfig" tags: ["System"] /info: get: summary: "Get system information" operationId: "SystemInfo" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/SystemInfo" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /version: get: summary: "Get version" description: "Returns the version of Docker that is running and various information about the system that Docker is running on." operationId: "SystemVersion" produces: ["application/json"] responses: 200: description: "no error" schema: $ref: "#/definitions/SystemVersion" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /_ping: get: summary: "Ping" description: "This is a dummy endpoint you can use to test if the server is accessible." operationId: "SystemPing" produces: ["text/plain"] responses: 200: description: "no error" schema: type: "string" example: "OK" headers: API-Version: type: "string" description: "Max API Version the server supports" Builder-Version: type: "string" description: "Default version of docker image builder" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" headers: Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" tags: ["System"] head: summary: "Ping" description: "This is a dummy endpoint you can use to test if the server is accessible." operationId: "SystemPingHead" produces: ["text/plain"] responses: 200: description: "no error" schema: type: "string" example: "(empty)" headers: API-Version: type: "string" description: "Max API Version the server supports" Builder-Version: type: "string" description: "Default version of docker image builder" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /commit: post: summary: "Create a new image from a container" operationId: "ImageCommit" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "containerConfig" in: "body" description: "The container configuration" schema: $ref: "#/definitions/ContainerConfig" - name: "container" in: "query" description: "The ID or name of the container to commit" type: "string" - name: "repo" in: "query" description: "Repository name for the created image" type: "string" - name: "tag" in: "query" description: "Tag name for the create image" type: "string" - name: "comment" in: "query" description: "Commit message" type: "string" - name: "author" in: "query" description: "Author of the image (e.g., `John Hannibal Smith <[email protected]>`)" type: "string" - name: "pause" in: "query" description: "Whether to pause the container before committing" type: "boolean" default: true - name: "changes" in: "query" description: "`Dockerfile` instructions to apply while committing" type: "string" tags: ["Image"] /events: get: summary: "Monitor events" description: | Stream real-time events from the server. Various objects within Docker report events when something happens to them. Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` The Docker daemon reports these events: `reload` Services report these events: `create`, `update`, and `remove` Nodes report these events: `create`, `update`, and `remove` Secrets report these events: `create`, `update`, and `remove` Configs report these events: `create`, `update`, and `remove` The Builder reports `prune` events operationId: "SystemEvents" produces: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "SystemEventsResponse" properties: Type: description: "The type of object emitting the event" type: "string" Action: description: "The type of event" type: "string" Actor: type: "object" properties: ID: description: "The ID of the object emitting the event" type: "string" Attributes: description: "Various key/value attributes of the object, depending on its type" type: "object" additionalProperties: type: "string" time: description: "Timestamp of event" type: "integer" timeNano: description: "Timestamp of event, with nanosecond accuracy" type: "integer" format: "int64" examples: application/json: Type: "container" Action: "create" Actor: ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" Attributes: com.example.some-label: "some-label-value" image: "alpine" name: "my-container" time: 1461943101 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "since" in: "query" description: "Show events created since this timestamp then stream new events." type: "string" - name: "until" in: "query" description: "Show events created until this timestamp then stop streaming." type: "string" - name: "filters" in: "query" description: | A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: - `config=<string>` config name or ID - `container=<string>` container name or ID - `daemon=<string>` daemon name or ID - `event=<string>` event type - `image=<string>` image name or ID - `label=<string>` image or container label - `network=<string>` network name or ID - `node=<string>` node ID - `plugin`=<string> plugin name or ID - `scope`=<string> local or swarm - `secret=<string>` secret name or ID - `service=<string>` service name or ID - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` - `volume=<string>` volume name type: "string" tags: ["System"] /system/df: get: summary: "Get data usage information" operationId: "SystemDataUsage" responses: 200: description: "no error" schema: type: "object" title: "SystemDataUsageResponse" properties: LayersSize: type: "integer" format: "int64" Images: type: "array" items: $ref: "#/definitions/ImageSummary" Containers: type: "array" items: $ref: "#/definitions/ContainerSummary" Volumes: type: "array" items: $ref: "#/definitions/Volume" BuildCache: type: "array" items: $ref: "#/definitions/BuildCache" example: LayersSize: 1092588 Images: - Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" ParentId: "" RepoTags: - "busybox:latest" RepoDigests: - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" Created: 1466724217 Size: 1092588 SharedSize: 0 VirtualSize: 1092588 Labels: {} Containers: 1 Containers: - Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" Names: - "/top" Image: "busybox" ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" Command: "top" Created: 1472592424 Ports: [] SizeRootFs: 1092588 Labels: {} State: "exited" Status: "Exited (0) 56 minutes ago" HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: IPAMConfig: null Links: null Aliases: null NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" Gateway: "172.18.0.1" IPAddress: "172.18.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:12:00:02" Mounts: [] Volumes: - Name: "my-volume" Driver: "local" Mountpoint: "/var/lib/docker/volumes/my-volume/_data" Labels: null Scope: "local" Options: null UsageData: Size: 10920104 RefCount: 2 BuildCache: - ID: "hw53o5aio51xtltp5xjp8v7fx" Parent: "" Type: "regular" Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" InUse: false Shared: true Size: 0 CreatedAt: "2021-06-28T13:31:01.474619385Z" LastUsedAt: "2021-07-07T22:02:32.738075951Z" UsageCount: 26 - ID: "ndlpt0hhvkqcdfkputsk4cq9c" Parent: "hw53o5aio51xtltp5xjp8v7fx" Type: "regular" Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" InUse: false Shared: true Size: 51 CreatedAt: "2021-06-28T13:31:03.002625487Z" LastUsedAt: "2021-07-07T22:02:32.773909517Z" UsageCount: 26 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "type" in: "query" description: | Object types, for which to compute and return data. type: "array" collectionFormat: multi items: type: "string" enum: ["container", "image", "volume", "build-cache"] tags: ["System"] /images/{name}/get: get: summary: "Export an image" description: | Get a tarball containing all images and metadata for a repository. If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. ### Image tarball format An image tarball contains one directory per image layer (named using its long ID), each containing these files: - `VERSION`: currently `1.0` - the file format version - `json`: detailed layer information, similar to `docker inspect layer_id` - `layer.tar`: A tarfile containing the filesystem changes in this layer The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ```json { "hello-world": { "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" } } ``` operationId: "ImageGet" produces: - "application/x-tar" responses: 200: description: "no error" schema: type: "string" format: "binary" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true tags: ["Image"] /images/get: get: summary: "Export several images" description: | Get a tarball containing all images and metadata for several image repositories. For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageGetAll" produces: - "application/x-tar" responses: 200: description: "no error" schema: type: "string" format: "binary" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "names" in: "query" description: "Image names to filter by" type: "array" items: type: "string" tags: ["Image"] /images/load: post: summary: "Import images" description: | Load a set of images and tags into a repository. For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageLoad" consumes: - "application/x-tar" produces: - "application/json" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "imagesTarball" in: "body" description: "Tar archive containing images" schema: type: "string" format: "binary" - name: "quiet" in: "query" description: "Suppress progress details during load." type: "boolean" default: false tags: ["Image"] /containers/{id}/exec: post: summary: "Create an exec instance" description: "Run a command inside a running container." operationId: "ContainerExec" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "container is paused" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "execConfig" in: "body" description: "Exec configuration" schema: type: "object" title: "ExecConfig" properties: AttachStdin: type: "boolean" description: "Attach to `stdin` of the exec command." AttachStdout: type: "boolean" description: "Attach to `stdout` of the exec command." AttachStderr: type: "boolean" description: "Attach to `stderr` of the exec command." DetachKeys: type: "string" description: | Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Tty: type: "boolean" description: "Allocate a pseudo-TTY." Env: description: | A list of environment variables in the form `["VAR=value", ...]`. type: "array" items: type: "string" Cmd: type: "array" description: "Command to run, as a string or array of strings." items: type: "string" Privileged: type: "boolean" description: "Runs the exec process with extended privileges." default: false User: type: "string" description: | The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`. WorkingDir: type: "string" description: | The working directory for the exec process inside the container. example: AttachStdin: false AttachStdout: true AttachStderr: true DetachKeys: "ctrl-p,ctrl-q" Tty: false Cmd: - "date" Env: - "FOO=bar" - "BAZ=quux" required: true - name: "id" in: "path" description: "ID or name of container" type: "string" required: true tags: ["Exec"] /exec/{id}/start: post: summary: "Start an exec instance" description: | Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command. operationId: "ExecStart" consumes: - "application/json" produces: - "application/vnd.docker.raw-stream" responses: 200: description: "No error" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Container is stopped or paused" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "execStartConfig" in: "body" schema: type: "object" title: "ExecStartConfig" properties: Detach: type: "boolean" description: "Detach from the command." Tty: type: "boolean" description: "Allocate a pseudo-TTY." example: Detach: false Tty: false - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" tags: ["Exec"] /exec/{id}/resize: post: summary: "Resize an exec instance" description: | Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance. operationId: "ExecResize" responses: 201: description: "No error" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" - name: "h" in: "query" description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" description: "Width of the TTY session in characters" type: "integer" tags: ["Exec"] /exec/{id}/json: get: summary: "Inspect an exec instance" description: "Return low-level information about an exec instance." operationId: "ExecInspect" produces: - "application/json" responses: 200: description: "No error" schema: type: "object" title: "ExecInspectResponse" properties: CanRemove: type: "boolean" DetachKeys: type: "string" ID: type: "string" Running: type: "boolean" ExitCode: type: "integer" ProcessConfig: $ref: "#/definitions/ProcessConfig" OpenStdin: type: "boolean" OpenStderr: type: "boolean" OpenStdout: type: "boolean" ContainerID: type: "string" Pid: type: "integer" description: "The system process ID for the exec process." examples: application/json: CanRemove: false ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" DetachKeys: "" ExitCode: 2 ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" OpenStderr: true OpenStdin: true OpenStdout: true ProcessConfig: arguments: - "-c" - "exit 2" entrypoint: "sh" privileged: false tty: true user: "1000" Running: false Pid: 42000 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" tags: ["Exec"] /volumes: get: summary: "List volumes" operationId: "VolumeList" produces: ["application/json"] responses: 200: description: "Summary volume data that matches the query" schema: type: "object" title: "VolumeListResponse" description: "Volume list response" required: [Volumes, Warnings] properties: Volumes: type: "array" x-nullable: false description: "List of volumes" items: $ref: "#/definitions/Volume" Warnings: type: "array" x-nullable: false description: | Warnings that occurred when fetching the list of volumes. items: type: "string" examples: application/json: Volumes: - CreatedAt: "2017-07-19T12:00:26Z" Name: "tardis" Driver: "local" Mountpoint: "/var/lib/docker/volumes/tardis" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Scope: "local" Options: device: "tmpfs" o: "size=100m,uid=1000" type: "tmpfs" Warnings: [] 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. Available filters: - `dangling=<boolean>` When set to `true` (or `1`), returns all volumes that are not in use by a container. When set to `false` (or `0`), only volumes that are in use by one or more containers are returned. - `driver=<volume-driver-name>` Matches volumes based on their driver. - `label=<key>` or `label=<key>:<value>` Matches volumes based on the presence of a `label` alone or a `label` and a value. - `name=<volume-name>` Matches all or part of a volume name. type: "string" format: "json" tags: ["Volume"] /volumes/create: post: summary: "Create a volume" operationId: "VolumeCreate" consumes: ["application/json"] produces: ["application/json"] responses: 201: description: "The volume was created successfully" schema: $ref: "#/definitions/Volume" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "volumeConfig" in: "body" required: true description: "Volume configuration" schema: type: "object" description: "Volume configuration" title: "VolumeConfig" properties: Name: description: | The new volume's name. If not specified, Docker generates a name. type: "string" x-nullable: false Driver: description: "Name of the volume driver to use." type: "string" default: "local" x-nullable: false DriverOpts: description: | A mapping of driver options and values. These options are passed directly to the driver and are driver specific. type: "object" additionalProperties: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: Name: "tardis" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Driver: "custom" tags: ["Volume"] /volumes/{name}: get: summary: "Inspect a volume" operationId: "VolumeInspect" produces: ["application/json"] responses: 200: description: "No error" schema: $ref: "#/definitions/Volume" 404: description: "No such volume" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" required: true description: "Volume name or ID" type: "string" tags: ["Volume"] delete: summary: "Remove a volume" description: "Instruct the driver to remove the volume." operationId: "VolumeDelete" responses: 204: description: "The volume was removed" 404: description: "No such volume or volume driver" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Volume is in use and cannot be removed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" required: true description: "Volume name or ID" type: "string" - name: "force" in: "query" description: "Force the removal of the volume" type: "boolean" default: false tags: ["Volume"] /volumes/prune: post: summary: "Delete unused volumes" produces: - "application/json" operationId: "VolumePrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "VolumePruneResponse" properties: VolumesDeleted: description: "Volumes that were deleted" type: "array" items: type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Volume"] /networks: get: summary: "List networks" description: | Returns a list of networks. For details on the format, see the [network inspect endpoint](#operation/NetworkInspect). Note that it uses a different, smaller representation of a network than inspecting a single network. For example, the list of containers attached to the network is not propagated in API versions 1.28 and up. operationId: "NetworkList" produces: - "application/json" responses: 200: description: "No error" schema: type: "array" items: $ref: "#/definitions/Network" examples: application/json: - Name: "bridge" Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" Created: "2016-10-19T06:21:00.416543526Z" Scope: "local" Driver: "bridge" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: - Subnet: "172.17.0.0/16" Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" - Name: "none" Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "null" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: [] Containers: {} Options: {} - Name: "host" Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "host" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: [] Containers: {} Options: {} 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: - `dangling=<boolean>` When set to `true` (or `1`), returns all networks that are not in use by a container. When set to `false` (or `0`), only networks that are in use by one or more containers are returned. - `driver=<driver-name>` Matches a network's driver. - `id=<network-id>` Matches all or part of a network ID. - `label=<key>` or `label=<key>=<value>` of a network label. - `name=<network-name>` Matches all or part of a network name. - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. type: "string" tags: ["Network"] /networks/{id}: get: summary: "Inspect a network" operationId: "NetworkInspect" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/Network" 404: description: "Network not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "verbose" in: "query" description: "Detailed inspect output for troubleshooting" type: "boolean" default: false - name: "scope" in: "query" description: "Filter the network by scope (swarm, global, or local)" type: "string" tags: ["Network"] delete: summary: "Remove a network" operationId: "NetworkDelete" responses: 204: description: "No error" 403: description: "operation not supported for pre-defined networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such network" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" tags: ["Network"] /networks/create: post: summary: "Create a network" operationId: "NetworkCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "No error" schema: type: "object" title: "NetworkCreateResponse" properties: Id: description: "The ID of the created network." type: "string" Warning: type: "string" example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" 403: description: "operation not supported for pre-defined networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "plugin not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "networkConfig" in: "body" description: "Network configuration" required: true schema: type: "object" title: "NetworkCreateRequest" required: ["Name"] properties: Name: description: "The network's name." type: "string" CheckDuplicate: description: | Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions. type: "boolean" Driver: description: "Name of the network driver plugin to use." type: "string" default: "bridge" Internal: description: "Restrict external access to the network." type: "boolean" Attachable: description: | Globally scoped network is manually attachable by regular containers from workers in swarm mode. type: "boolean" Ingress: description: | Ingress network is the network which provides the routing-mesh in swarm mode. type: "boolean" IPAM: description: "Optional custom IP scheme for the network." $ref: "#/definitions/IPAM" EnableIPv6: description: "Enable IPv6 on the network." type: "boolean" Options: description: "Network specific options to be used by the drivers." type: "object" additionalProperties: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: Name: "isolated_nw" CheckDuplicate: false Driver: "bridge" EnableIPv6: true IPAM: Driver: "default" Config: - Subnet: "172.20.0.0/16" IPRange: "172.20.10.0/24" Gateway: "172.20.10.11" - Subnet: "2001:db8:abcd::/64" Gateway: "2001:db8:abcd::1011" Options: foo: "bar" Internal: true Attachable: false Ingress: false Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" tags: ["Network"] /networks/{id}/connect: post: summary: "Connect a container to a network" operationId: "NetworkConnect" consumes: - "application/json" responses: 200: description: "No error" 403: description: "Operation not supported for swarm scoped networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Network or container not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "container" in: "body" required: true schema: type: "object" title: "NetworkConnectRequest" properties: Container: type: "string" description: "The ID or name of the container to connect to the network." EndpointConfig: $ref: "#/definitions/EndpointSettings" example: Container: "3613f73ba0e4" EndpointConfig: IPAMConfig: IPv4Address: "172.24.56.89" IPv6Address: "2001:db8::5689" tags: ["Network"] /networks/{id}/disconnect: post: summary: "Disconnect a container from a network" operationId: "NetworkDisconnect" consumes: - "application/json" responses: 200: description: "No error" 403: description: "Operation not supported for swarm scoped networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Network or container not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "container" in: "body" required: true schema: type: "object" title: "NetworkDisconnectRequest" properties: Container: type: "string" description: | The ID or name of the container to disconnect from the network. Force: type: "boolean" description: | Force the container to disconnect from the network. tags: ["Network"] /networks/prune: post: summary: "Delete unused networks" produces: - "application/json" operationId: "NetworkPrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "NetworkPruneResponse" properties: NetworksDeleted: description: "Networks that were deleted" type: "array" items: type: "string" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Network"] /plugins: get: summary: "List plugins" operationId: "PluginList" description: "Returns information about installed plugins." produces: ["application/json"] responses: 200: description: "No error" schema: type: "array" items: $ref: "#/definitions/Plugin" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: - `capability=<capability name>` - `enable=<true>|<false>` tags: ["Plugin"] /plugins/privileges: get: summary: "Get plugin privileges" operationId: "GetPluginPrivileges" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/PluginPrivilegeItem" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "remote" in: "query" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: - "Plugin" /plugins/pull: post: summary: "Install a plugin" operationId: "PluginPull" description: | Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). produces: - "application/json" responses: 204: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "remote" in: "query" description: | Remote reference for plugin to install. The `:latest` tag is optional, and is used as the default if omitted. required: true type: "string" - name: "name" in: "query" description: | Local name for the pulled plugin. The `:latest` tag is optional, and is used as the default if omitted. required: false type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration to use when pulling a plugin from a registry. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "body" in: "body" schema: type: "array" items: $ref: "#/definitions/PluginPrivilegeItem" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" tags: ["Plugin"] /plugins/{name}/json: get: summary: "Inspect a plugin" operationId: "PluginInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Plugin" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: ["Plugin"] /plugins/{name}: delete: summary: "Remove a plugin" operationId: "PluginDelete" responses: 200: description: "no error" schema: $ref: "#/definitions/Plugin" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "force" in: "query" description: | Disable the plugin before removing. This may result in issues if the plugin is in use by a container. type: "boolean" default: false tags: ["Plugin"] /plugins/{name}/enable: post: summary: "Enable a plugin" operationId: "PluginEnable" responses: 200: description: "no error" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "timeout" in: "query" description: "Set the HTTP client timeout (in seconds)" type: "integer" default: 0 tags: ["Plugin"] /plugins/{name}/disable: post: summary: "Disable a plugin" operationId: "PluginDisable" responses: 200: description: "no error" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: ["Plugin"] /plugins/{name}/upgrade: post: summary: "Upgrade a plugin" operationId: "PluginUpgrade" responses: 204: description: "no error" 404: description: "plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "remote" in: "query" description: | Remote reference to upgrade to. The `:latest` tag is optional, and is used as the default if omitted. required: true type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration to use when pulling a plugin from a registry. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "body" in: "body" schema: type: "array" items: $ref: "#/definitions/PluginPrivilegeItem" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" tags: ["Plugin"] /plugins/create: post: summary: "Create a plugin" operationId: "PluginCreate" consumes: - "application/x-tar" responses: 204: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "query" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "tarContext" in: "body" description: "Path to tar containing plugin rootfs and manifest" schema: type: "string" format: "binary" tags: ["Plugin"] /plugins/{name}/push: post: summary: "Push a plugin" operationId: "PluginPush" description: | Push a plugin to the registry. parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" responses: 200: description: "no error" 404: description: "plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Plugin"] /plugins/{name}/set: post: summary: "Configure a plugin" operationId: "PluginSet" consumes: - "application/json" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "body" in: "body" schema: type: "array" items: type: "string" example: ["DEBUG=1"] responses: 204: description: "No error" 404: description: "Plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Plugin"] /nodes: get: summary: "List nodes" operationId: "NodeList" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Node" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). Available filters: - `id=<node id>` - `label=<engine label>` - `membership=`(`accepted`|`pending`)` - `name=<node name>` - `node.label=<node label>` - `role=`(`manager`|`worker`)` type: "string" tags: ["Node"] /nodes/{id}: get: summary: "Inspect a node" operationId: "NodeInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Node" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the node" type: "string" required: true tags: ["Node"] delete: summary: "Delete a node" operationId: "NodeDelete" responses: 200: description: "no error" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the node" type: "string" required: true - name: "force" in: "query" description: "Force remove a node from the swarm" default: false type: "boolean" tags: ["Node"] /nodes/{id}/update: post: summary: "Update a node" operationId: "NodeUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID of the node" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/NodeSpec" - name: "version" in: "query" description: | The version number of the node object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Node"] /swarm: get: summary: "Inspect swarm" operationId: "SwarmInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Swarm" 404: description: "no such swarm" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /swarm/init: post: summary: "Initialize a new swarm" operationId: "SwarmInit" produces: - "application/json" - "text/plain" responses: 200: description: "no error" schema: description: "The node ID" type: "string" example: "7v2t30z9blmxuhnyo6s4cpenp" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is already part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmInitRequest" properties: ListenAddr: description: | Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used. type: "string" AdvertiseAddr: description: | Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`, or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is possible to separate the container data traffic from the management traffic of the cluster. type: "string" DataPathPort: description: | DataPathPort specifies the data path port number for data traffic. Acceptable port range is 1024 to 49151. if no port is set or is set to 0, default port 4789 will be used. type: "integer" format: "uint32" DefaultAddrPool: description: | Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" example: ["10.10.0.0/16", "20.20.0.0/16"] ForceNewCluster: description: "Force creation of a new swarm." type: "boolean" SubnetSize: description: | SubnetSize specifies the subnet size of the networks created from the default subnet pool. type: "integer" format: "uint32" Spec: $ref: "#/definitions/SwarmSpec" example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" DataPathPort: 4789 DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] SubnetSize: 24 ForceNewCluster: false Spec: Orchestration: {} Raft: {} Dispatcher: {} CAConfig: {} EncryptionConfig: AutoLockManagers: false tags: ["Swarm"] /swarm/join: post: summary: "Join an existing swarm" operationId: "SwarmJoin" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is already part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmJoinRequest" properties: ListenAddr: description: | Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). type: "string" AdvertiseAddr: description: | Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`, or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is possible to separate the container data traffic from the management traffic of the cluster. type: "string" RemoteAddrs: description: | Addresses of manager nodes already participating in the swarm. type: "array" items: type: "string" JoinToken: description: "Secret token for joining this swarm." type: "string" example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" RemoteAddrs: - "node1:2377" JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" tags: ["Swarm"] /swarm/leave: post: summary: "Leave a swarm" operationId: "SwarmLeave" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "force" description: | Force leave swarm, even if this is the last manager or that it will break the cluster. in: "query" type: "boolean" default: false tags: ["Swarm"] /swarm/update: post: summary: "Update a swarm" operationId: "SwarmUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: $ref: "#/definitions/SwarmSpec" - name: "version" in: "query" description: | The version number of the swarm object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true - name: "rotateWorkerToken" in: "query" description: "Rotate the worker join token." type: "boolean" default: false - name: "rotateManagerToken" in: "query" description: "Rotate the manager join token." type: "boolean" default: false - name: "rotateManagerUnlockKey" in: "query" description: "Rotate the manager unlock key." type: "boolean" default: false tags: ["Swarm"] /swarm/unlockkey: get: summary: "Get the unlock key" operationId: "SwarmUnlockkey" consumes: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "UnlockKeyResponse" properties: UnlockKey: description: "The swarm's unlock key." type: "string" example: UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /swarm/unlock: post: summary: "Unlock a locked manager" operationId: "SwarmUnlock" consumes: - "application/json" produces: - "application/json" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmUnlockRequest" properties: UnlockKey: description: "The swarm's unlock key." type: "string" example: UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /services: get: summary: "List services" operationId: "ServiceList" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Service" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: - `id=<service id>` - `label=<service label>` - `mode=["replicated"|"global"]` - `name=<service name>` - name: "status" in: "query" type: "boolean" description: | Include service status, with count of running and desired tasks. tags: ["Service"] /services/create: post: summary: "Create a service" operationId: "ServiceCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: type: "object" title: "ServiceCreateResponse" properties: ID: description: "The ID of the created service." type: "string" Warning: description: "Optional warning message" type: "string" example: ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 403: description: "network is not eligible for services" schema: $ref: "#/definitions/ErrorResponse" 409: description: "name conflicts with an existing service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: allOf: - $ref: "#/definitions/ServiceSpec" - type: "object" example: Name: "web" TaskTemplate: ContainerSpec: Image: "nginx:alpine" Mounts: - ReadOnly: true Source: "web-data" Target: "/usr/share/nginx/html" Type: "volume" VolumeOptions: DriverConfig: {} Labels: com.example.something: "something-value" Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] User: "33" DNSConfig: Nameservers: ["8.8.8.8"] Search: ["example.org"] Options: ["timeout:3"] Secrets: - File: Name: "www.example.org.key" UID: "33" GID: "33" Mode: 384 SecretID: "fpjqlhnwb19zds35k8wn80lq9" SecretName: "example_org_domain_key" LogDriver: Name: "json-file" Options: max-file: "3" max-size: "10M" Placement: {} Resources: Limits: MemoryBytes: 104857600 Reservations: {} RestartPolicy: Condition: "on-failure" Delay: 10000000000 MaxAttempts: 10 Mode: Replicated: Replicas: 4 UpdateConfig: Parallelism: 2 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Ports: - Protocol: "tcp" PublishedPort: 8080 TargetPort: 80 Labels: foo: "bar" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration for pulling from private registries. Refer to the [authentication section](#section/Authentication) for details. type: "string" tags: ["Service"] /services/{id}: get: summary: "Inspect a service" operationId: "ServiceInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Service" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" - name: "insertDefaults" in: "query" description: "Fill empty fields with default values." type: "boolean" default: false tags: ["Service"] delete: summary: "Delete a service" operationId: "ServiceDelete" responses: 200: description: "no error" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" tags: ["Service"] /services/{id}/update: post: summary: "Update a service" operationId: "ServiceUpdate" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "no error" schema: $ref: "#/definitions/ServiceUpdateResponse" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" - name: "body" in: "body" required: true schema: allOf: - $ref: "#/definitions/ServiceSpec" - type: "object" example: Name: "top" TaskTemplate: ContainerSpec: Image: "busybox" Args: - "top" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ForceUpdate: 0 Mode: Replicated: Replicas: 1 UpdateConfig: Parallelism: 2 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Mode: "vip" - name: "version" in: "query" description: | The version number of the service object being updated. This is required to avoid conflicting writes. This version number should be the value as currently set on the service *before* the update. You can find the current version by calling `GET /services/{id}` required: true type: "integer" - name: "registryAuthFrom" in: "query" description: | If the `X-Registry-Auth` header is not specified, this parameter indicates where to find registry authorization credentials. type: "string" enum: ["spec", "previous-spec"] default: "spec" - name: "rollback" in: "query" description: | Set to this parameter to `previous` to cause a server-side rollback to the previous service spec. The supplied spec will be ignored in this case. type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration for pulling from private registries. Refer to the [authentication section](#section/Authentication) for details. type: "string" tags: ["Service"] /services/{id}/logs: get: summary: "Get service logs" description: | Get `stdout` and `stderr` logs from a service. See also [`/containers/{id}/logs`](#operation/ContainerLogs). **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "ServiceLogs" responses: 200: description: "logs returned as a stream in response body" schema: type: "string" format: "binary" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such service: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the service" type: "string" - name: "details" in: "query" description: "Show service context and extra details provided to logs." type: "boolean" default: false - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Service"] /tasks: get: summary: "List tasks" operationId: "TaskList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Task" example: - ID: "0kzzo1i0y4jz6027t0k7aezc7" Version: Index: 71 CreatedAt: "2016-06-07T21:07:31.171892745Z" UpdatedAt: "2016-06-07T21:07:31.376370513Z" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:31.290032978Z" State: "running" Message: "started" ContainerStatus: ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" PID: 677 DesiredState: "running" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.10/16" - ID: "1yljwbmlr8er2waf8orvqpwms" Version: Index: 30 CreatedAt: "2016-06-07T21:07:30.019104782Z" UpdatedAt: "2016-06-07T21:07:30.231958098Z" Name: "hopeful_cori" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:30.202183143Z" State: "shutdown" Message: "shutdown" ContainerStatus: ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" DesiredState: "shutdown" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.5/16" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: - `desired-state=(running | shutdown | accepted)` - `id=<task id>` - `label=key` or `label="key=value"` - `name=<task name>` - `node=<node id or name>` - `service=<service name>` tags: ["Task"] /tasks/{id}: get: summary: "Inspect a task" operationId: "TaskInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Task" 404: description: "no such task" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID of the task" required: true type: "string" tags: ["Task"] /tasks/{id}/logs: get: summary: "Get task logs" description: | Get `stdout` and `stderr` logs from a task. See also [`/containers/{id}/logs`](#operation/ContainerLogs). **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "TaskLogs" responses: 200: description: "logs returned as a stream in response body" schema: type: "string" format: "binary" 404: description: "no such task" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such task: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID of the task" type: "string" - name: "details" in: "query" description: "Show task context and extra details provided to logs." type: "boolean" default: false - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Task"] /secrets: get: summary: "List secrets" operationId: "SecretList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Secret" example: - ID: "blt1owaxmitz71s9v5zh81zun" Version: Index: 85 CreatedAt: "2017-07-20T13:55:28.678958722Z" UpdatedAt: "2017-07-20T13:55:28.678958722Z" Spec: Name: "mysql-passwd" Labels: some.label: "some.value" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" - ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" Labels: foo: "bar" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: - `id=<secret id>` - `label=<key> or label=<key>=value` - `name=<secret name>` - `names=<secret name>` tags: ["Secret"] /secrets/create: post: summary: "Create a secret" operationId: "SecretCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 409: description: "name conflicts with an existing object" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" schema: allOf: - $ref: "#/definitions/SecretSpec" - type: "object" example: Name: "app-key.crt" Labels: foo: "bar" Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" tags: ["Secret"] /secrets/{id}: get: summary: "Inspect a secret" operationId: "SecretInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Secret" examples: application/json: ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" Labels: foo: "bar" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" 404: description: "secret not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the secret" tags: ["Secret"] delete: summary: "Delete a secret" operationId: "SecretDelete" produces: - "application/json" responses: 204: description: "no error" 404: description: "secret not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the secret" tags: ["Secret"] /secrets/{id}/update: post: summary: "Update a Secret" operationId: "SecretUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such secret" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the secret" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/SecretSpec" description: | The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values. - name: "version" in: "query" description: | The version number of the secret object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Secret"] /configs: get: summary: "List configs" operationId: "ConfigList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Config" example: - ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "server.conf" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters: - `id=<config id>` - `label=<key> or label=<key>=value` - `name=<config name>` - `names=<config name>` tags: ["Config"] /configs/create: post: summary: "Create a config" operationId: "ConfigCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 409: description: "name conflicts with an existing object" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" schema: allOf: - $ref: "#/definitions/ConfigSpec" - type: "object" example: Name: "server.conf" Labels: foo: "bar" Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" tags: ["Config"] /configs/{id}: get: summary: "Inspect a config" operationId: "ConfigInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Config" examples: application/json: ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" 404: description: "config not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the config" tags: ["Config"] delete: summary: "Delete a config" operationId: "ConfigDelete" produces: - "application/json" responses: 204: description: "no error" 404: description: "config not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the config" tags: ["Config"] /configs/{id}/update: post: summary: "Update a Config" operationId: "ConfigUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such config" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the config" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/ConfigSpec" description: | The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values. - name: "version" in: "query" description: | The version number of the config object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Config"] /distribution/{name}/json: get: summary: "Get image information from the registry" description: | Return image digest and platform information by contacting the registry. operationId: "DistributionInspect" produces: - "application/json" responses: 200: description: "descriptor and platform information" schema: type: "object" x-go-name: DistributionInspect title: "DistributionInspectResponse" required: [Descriptor, Platforms] properties: Descriptor: type: "object" description: | A descriptor struct containing digest, media type, and size. properties: mediaType: type: "string" size: type: "integer" format: "int64" digest: type: "string" urls: type: "array" items: type: "string" Platforms: type: "array" description: | An array containing all platforms supported by the image. items: type: "object" properties: architecture: type: "string" os: type: "string" os.version: type: "string" os.features: type: "array" items: type: "string" variant: type: "string" Features: type: "array" items: type: "string" examples: application/json: Descriptor: MediaType: "application/vnd.docker.distribution.manifest.v2+json" Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" Size: 3987495 URLs: - "" Platforms: - architecture: "amd64" os: "linux" os.version: "" os.features: - "" variant: "" Features: - "" 401: description: "Failed authentication or no image found" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: someimage (tag: latest)" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or id" type: "string" required: true tags: ["Distribution"] /session: post: summary: "Initialize interactive session" description: | Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. ### Hijacking This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. For example, the client sends this request to upgrade the connection: ``` POST /session HTTP/1.1 Upgrade: h2c Connection: Upgrade ``` The Docker daemon responds with a `101 UPGRADED` response follow with the raw stream: ``` HTTP/1.1 101 UPGRADED Connection: Upgrade Upgrade: h2c ``` operationId: "Session" produces: - "application/vnd.docker.raw-stream" responses: 101: description: "no error, hijacking successful" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Session"]
# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. # # This is used for generating API documentation and the types used by the # client/server. See api/README.md for more information. # # Some style notes: # - This file is used by ReDoc, which allows GitHub Flavored Markdown in # descriptions. # - There is no maximum line length, for ease of editing and pretty diffs. # - operationIds are in the format "NounVerb", with a singular noun. swagger: "2.0" schemes: - "http" - "https" produces: - "application/json" - "text/plain" consumes: - "application/json" - "text/plain" basePath: "/v1.42" info: title: "Docker Engine API" version: "1.42" x-logo: url: "https://docs.docker.com/images/logo-docker-main.png" description: | The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. # Errors The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: ``` { "message": "page not found" } ``` # Versioning The API is usually changed in each release, so API calls are versioned to ensure that clients don't break. To lock to a specific version of the API, you prefix the URL with its version, for example, call `/v1.30/info` to use the v1.30 version of the `/info` endpoint. If the API version specified in the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. If you omit the version-prefix, the current version of the API (v1.42) is used. For example, calling `/info` is the same as calling `/v1.42/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer daemons. # Authentication Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) (JSON) string with the following structure: ``` { "username": "string", "password": "string", "email": "string", "serveraddress": "string" } ``` The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: ``` { "identitytoken": "9cbaf023786cd7..." } ``` # The tags on paths define the menu sections in the ReDoc documentation, so # the usage of tags must make sense for that: # - They should be singular, not plural. # - There should not be too many tags, or the menu becomes unwieldy. For # example, it is preferable to add a path to the "System" tag instead of # creating a tag with a single path in it. # - The order of tags in this list defines the order in the menu. tags: # Primary objects - name: "Container" x-displayName: "Containers" description: | Create and manage containers. - name: "Image" x-displayName: "Images" - name: "Network" x-displayName: "Networks" description: | Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/network/) for more information. - name: "Volume" x-displayName: "Volumes" description: | Create and manage persistent storage that can be attached to containers. - name: "Exec" x-displayName: "Exec" description: | Run new commands inside running containers. Refer to the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. # Swarm things - name: "Swarm" x-displayName: "Swarm" description: | Engines can be clustered together in a swarm. Refer to the [swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. - name: "Node" x-displayName: "Nodes" description: | Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Service" x-displayName: "Services" description: | Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Task" x-displayName: "Tasks" description: | A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. - name: "Secret" x-displayName: "Secrets" description: | Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. - name: "Config" x-displayName: "Configs" description: | Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work. # System things - name: "Plugin" x-displayName: "Plugins" - name: "System" x-displayName: "System" definitions: Port: type: "object" description: "An open port on a container" required: [PrivatePort, Type] properties: IP: type: "string" format: "ip-address" description: "Host IP address that the container's port is mapped to" PrivatePort: type: "integer" format: "uint16" x-nullable: false description: "Port on the container" PublicPort: type: "integer" format: "uint16" description: "Port exposed on the host" Type: type: "string" x-nullable: false enum: ["tcp", "udp", "sctp"] example: PrivatePort: 8080 PublicPort: 80 Type: "tcp" MountPoint: type: "object" description: "A mount point inside a container" properties: Type: type: "string" Name: type: "string" Source: type: "string" Destination: type: "string" Driver: type: "string" Mode: type: "string" RW: type: "boolean" Propagation: type: "string" DeviceMapping: type: "object" description: "A device mapping between the host and container" properties: PathOnHost: type: "string" PathInContainer: type: "string" CgroupPermissions: type: "string" example: PathOnHost: "/dev/deviceName" PathInContainer: "/dev/deviceName" CgroupPermissions: "mrw" DeviceRequest: type: "object" description: "A request for devices to be sent to device drivers" properties: Driver: type: "string" example: "nvidia" Count: type: "integer" example: -1 DeviceIDs: type: "array" items: type: "string" example: - "0" - "1" - "GPU-fef8089b-4820-abfc-e83e-94318197576e" Capabilities: description: | A list of capabilities; an OR list of AND lists of capabilities. type: "array" items: type: "array" items: type: "string" example: # gpu AND nvidia AND compute - ["gpu", "nvidia", "compute"] Options: description: | Driver-specific options, specified as a key/value pairs. These options are passed directly to the driver. type: "object" additionalProperties: type: "string" ThrottleDevice: type: "object" properties: Path: description: "Device path" type: "string" Rate: description: "Rate" type: "integer" format: "int64" minimum: 0 Mount: type: "object" properties: Target: description: "Container path." type: "string" Source: description: "Mount source (e.g. a volume name, a host path)." type: "string" Type: description: | The mount type. Available types: - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. type: "string" enum: - "bind" - "volume" - "tmpfs" - "npipe" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" Consistency: description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." type: "string" BindOptions: description: "Optional configuration for the `bind` type." type: "object" properties: Propagation: description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." type: "string" enum: - "private" - "rprivate" - "shared" - "rshared" - "slave" - "rslave" NonRecursive: description: "Disable recursive bind mount." type: "boolean" default: false VolumeOptions: description: "Optional configuration for the `volume` type." type: "object" properties: NoCopy: description: "Populate volume with data from the target." type: "boolean" default: false Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" DriverConfig: description: "Map of driver specific options" type: "object" properties: Name: description: "Name of the driver to use to create the volume." type: "string" Options: description: "key/value map of driver specific options." type: "object" additionalProperties: type: "string" TmpfsOptions: description: "Optional configuration for the `tmpfs` type." type: "object" properties: SizeBytes: description: "The size for the tmpfs mount in bytes." type: "integer" format: "int64" Mode: description: "The permission mode for the tmpfs mount in an integer." type: "integer" RestartPolicy: description: | The behavior to apply when the container exits. The default is not to restart. An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. type: "object" properties: Name: type: "string" description: | - Empty string means not to restart - `no` Do not automatically restart - `always` Always restart - `unless-stopped` Restart always except when the user has manually stopped the container - `on-failure` Restart only when the container exit code is non-zero enum: - "" - "no" - "always" - "unless-stopped" - "on-failure" MaximumRetryCount: type: "integer" description: | If `on-failure` is used, the number of times to retry before giving up. Resources: description: "A container's resources (cgroups config, ulimits, etc)" type: "object" properties: # Applicable to all platforms CpuShares: description: | An integer value representing this container's relative CPU weight versus other containers. type: "integer" Memory: description: "Memory limit in bytes." type: "integer" format: "int64" default: 0 # Applicable to UNIX platforms CgroupParent: description: | Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. type: "string" BlkioWeight: description: "Block IO weight (relative weight)." type: "integer" minimum: 0 maximum: 1000 BlkioWeightDevice: description: | Block IO weight (relative device weight) in the form: ``` [{"Path": "device_path", "Weight": weight}] ``` type: "array" items: type: "object" properties: Path: type: "string" Weight: type: "integer" minimum: 0 BlkioDeviceReadBps: description: | Limit read rate (bytes per second) from a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteBps: description: | Limit write rate (bytes per second) to a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceReadIOps: description: | Limit read rate (IO per second) from a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteIOps: description: | Limit write rate (IO per second) to a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" CpuPeriod: description: "The length of a CPU period in microseconds." type: "integer" format: "int64" CpuQuota: description: | Microseconds of CPU time that the container can get in a CPU period. type: "integer" format: "int64" CpuRealtimePeriod: description: | The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpuRealtimeRuntime: description: | The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpusetCpus: description: | CPUs in which to allow execution (e.g., `0-3`, `0,1`). type: "string" example: "0-3" CpusetMems: description: | Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. type: "string" Devices: description: "A list of devices to add to the container." type: "array" items: $ref: "#/definitions/DeviceMapping" DeviceCgroupRules: description: "a list of cgroup rules to apply to the container" type: "array" items: type: "string" example: "c 13:* rwm" DeviceRequests: description: | A list of requests for devices to be sent to device drivers. type: "array" items: $ref: "#/definitions/DeviceRequest" KernelMemory: description: | Kernel memory limit in bytes. <p><br /></p> > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated > `kmem.limit_in_bytes`. type: "integer" format: "int64" example: 209715200 KernelMemoryTCP: description: "Hard limit for kernel TCP buffer memory (in bytes)." type: "integer" format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" format: "int64" MemorySwap: description: | Total memory limit (memory + swap). Set as `-1` to enable unlimited swap. type: "integer" format: "int64" MemorySwappiness: description: | Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. type: "integer" format: "int64" minimum: 0 maximum: 100 NanoCpus: description: "CPU quota in units of 10<sup>-9</sup> CPUs." type: "integer" format: "int64" OomKillDisable: description: "Disable OOM Killer for the container." type: "boolean" Init: description: | Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. type: "boolean" x-nullable: true PidsLimit: description: | Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. type: "integer" format: "int64" x-nullable: true Ulimits: description: | A list of resource limits to set in the container. For example: ``` {"Name": "nofile", "Soft": 1024, "Hard": 2048} ``` type: "array" items: type: "object" properties: Name: description: "Name of ulimit" type: "string" Soft: description: "Soft limit" type: "integer" Hard: description: "Hard limit" type: "integer" # Applicable to Windows CpuCount: description: | The number of usable CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" CpuPercent: description: | The usable percentage of the available CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" IOMaximumIOps: description: "Maximum IOps for the container system drive (Windows only)" type: "integer" format: "int64" IOMaximumBandwidth: description: | Maximum IO in bytes per second for the container system drive (Windows only). type: "integer" format: "int64" Limit: description: | An object describing a limit on resources which can be requested by a task. type: "object" properties: NanoCPUs: type: "integer" format: "int64" example: 4000000000 MemoryBytes: type: "integer" format: "int64" example: 8272408576 Pids: description: | Limits the maximum number of PIDs in the container. Set `0` for unlimited. type: "integer" format: "int64" default: 0 example: 100 ResourceObject: description: | An object describing the resources which can be advertised by a node and requested by a task. type: "object" properties: NanoCPUs: type: "integer" format: "int64" example: 4000000000 MemoryBytes: type: "integer" format: "int64" example: 8272408576 GenericResources: $ref: "#/definitions/GenericResources" GenericResources: description: | User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`). type: "array" items: type: "object" properties: NamedResourceSpec: type: "object" properties: Kind: type: "string" Value: type: "string" DiscreteResourceSpec: type: "object" properties: Kind: type: "string" Value: type: "integer" format: "int64" example: - DiscreteResourceSpec: Kind: "SSD" Value: 3 - NamedResourceSpec: Kind: "GPU" Value: "UUID1" - NamedResourceSpec: Kind: "GPU" Value: "UUID2" HealthConfig: description: "A test to perform to check that the container is healthy." type: "object" properties: Test: description: | The test to perform. Possible values are: - `[]` inherit healthcheck from image or parent image - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell type: "array" items: type: "string" Interval: description: | The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Timeout: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Retries: description: | The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit. type: "integer" StartPeriod: description: | Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Health: description: | Health stores information about the container's healthcheck results. type: "object" properties: Status: description: | Status is one of `none`, `starting`, `healthy` or `unhealthy` - "none" Indicates there is no healthcheck - "starting" Starting indicates that the container is not yet ready - "healthy" Healthy indicates that the container is running correctly - "unhealthy" Unhealthy indicates that the container has a problem type: "string" enum: - "none" - "starting" - "healthy" - "unhealthy" example: "healthy" FailingStreak: description: "FailingStreak is the number of consecutive failures" type: "integer" example: 0 Log: type: "array" description: | Log contains the last few results (oldest first) items: x-nullable: true $ref: "#/definitions/HealthcheckResult" HealthcheckResult: description: | HealthcheckResult stores information about a single run of a healthcheck probe type: "object" properties: Start: description: | Date and time at which this check started in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "date-time" example: "2020-01-04T10:44:24.496525531Z" End: description: | Date and time at which this check ended in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2020-01-04T10:45:21.364524523Z" ExitCode: description: | ExitCode meanings: - `0` healthy - `1` unhealthy - `2` reserved (considered unhealthy) - other values: error running probe type: "integer" example: 0 Output: description: "Output from last check" type: "string" HostConfig: description: "Container configuration that depends on the host we are running on" allOf: - $ref: "#/definitions/Resources" - type: "object" properties: # Applicable to all platforms Binds: type: "array" description: | A list of volume bindings for this container. Each volume binding is a string in one of these forms: - `host-src:container-dest[:options]` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - `volume-name:container-dest[:options]` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. `options` is an optional, comma-delimited list of: - `nocopy` disables automatic copying of data from the container path to the volume. The `nocopy` flag only applies to named volumes. - `[ro|rw]` mounts a volume read-only or read-write, respectively. If omitted or set to `rw`, volumes are mounted read-write. - `[z|Z]` applies SELinux labels to allow or deny multiple containers to read and write to the same volume. - `z`: a _shared_ content label is applied to the content. This label indicates that multiple containers can share the volume content, for both reading and writing. - `Z`: a _private unshared_ label is applied to the content. This label indicates that only the current container can use a private volume. Labeling systems such as SELinux require proper labels to be placed on volume content that is mounted into a container. Without a label, the security system can prevent a container's processes from using the content. By default, the labels set by the host operating system are not modified. - `[[r]shared|[r]slave|[r]private]` specifies mount [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). This only applies to bind-mounted volumes, not internal volumes or named volumes. Mount propagation requires the source mount point (the location where the source directory is mounted in the host operating system) to have the correct propagation properties. For shared volumes, the source mount point must be set to `shared`. For slave volumes, the mount must be set to either `shared` or `slave`. items: type: "string" ContainerIDFile: type: "string" description: "Path to a file where the container ID is written" LogConfig: type: "object" description: "The logging configuration for this container" properties: Type: type: "string" enum: - "json-file" - "syslog" - "journald" - "gelf" - "fluentd" - "awslogs" - "splunk" - "etwlogs" - "none" Config: type: "object" additionalProperties: type: "string" NetworkMode: type: "string" description: | Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken as a custom network's name to which this container should connect to. PortBindings: $ref: "#/definitions/PortMap" RestartPolicy: $ref: "#/definitions/RestartPolicy" AutoRemove: type: "boolean" description: | Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set. VolumeDriver: type: "string" description: "Driver that this container uses to mount volumes." VolumesFrom: type: "array" description: | A list of volumes to inherit from another container, specified in the form `<container name>[:<ro|rw>]`. items: type: "string" Mounts: description: | Specification for mounts to be added to the container. type: "array" items: $ref: "#/definitions/Mount" # Applicable to UNIX platforms CapAdd: type: "array" description: | A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'. items: type: "string" CapDrop: type: "array" description: | A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'. items: type: "string" CgroupnsMode: type: "string" enum: - "private" - "host" description: | cgroup namespace mode for the container. Possible values are: - `"private"`: the container runs in its own private cgroup namespace - `"host"`: use the host system's cgroup namespace If not specified, the daemon default is used, which can either be `"private"` or `"host"`, depending on daemon version, kernel support and configuration. Dns: type: "array" description: "A list of DNS servers for the container to use." items: type: "string" DnsOptions: type: "array" description: "A list of DNS options." items: type: "string" DnsSearch: type: "array" description: "A list of DNS search domains." items: type: "string" ExtraHosts: type: "array" description: | A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. items: type: "string" GroupAdd: type: "array" description: | A list of additional groups that the container process will run as. items: type: "string" IpcMode: type: "string" description: | IPC sharing mode for the container. Possible values are: - `"none"`: own private IPC namespace, with /dev/shm not mounted - `"private"`: own private IPC namespace - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers - `"container:<name|id>"`: join another (shareable) container's IPC namespace - `"host"`: use the host system's IPC namespace If not specified, daemon default is used, which can either be `"private"` or `"shareable"`, depending on daemon version and configuration. Cgroup: type: "string" description: "Cgroup to use for the container." Links: type: "array" description: | A list of links for the container in the form `container_name:alias`. items: type: "string" OomScoreAdj: type: "integer" description: | An integer value containing the score given to the container in order to tune OOM killer preferences. example: 500 PidMode: type: "string" description: | Set the PID (Process) Namespace mode for the container. It can be either: - `"container:<name|id>"`: joins another container's PID namespace - `"host"`: use the host's PID namespace inside the container Privileged: type: "boolean" description: "Gives the container full access to the host." PublishAllPorts: type: "boolean" description: | Allocates an ephemeral host port for all of a container's exposed ports. Ports are de-allocated when the container stops and allocated when the container starts. The allocated port might be changed when restarting the container. The port is selected from the ephemeral port range that depends on the kernel. For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. ReadonlyRootfs: type: "boolean" description: "Mount the container's root filesystem as read only." SecurityOpt: type: "array" description: "A list of string values to customize labels for MLS systems, such as SELinux." items: type: "string" StorageOpt: type: "object" description: | Storage driver options for this container, in the form `{"size": "120G"}`. additionalProperties: type: "string" Tmpfs: type: "object" description: | A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: ``` { "/run": "rw,noexec,nosuid,size=65536k" } ``` additionalProperties: type: "string" UTSMode: type: "string" description: "UTS namespace to use for the container." UsernsMode: type: "string" description: | Sets the usernamespace mode for the container when usernamespace remapping option is enabled. ShmSize: type: "integer" description: | Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. minimum: 0 Sysctls: type: "object" description: | A list of kernel parameters (sysctls) to set in the container. For example: ``` {"net.ipv4.ip_forward": "1"} ``` additionalProperties: type: "string" Runtime: type: "string" description: "Runtime to use with this container." # Applicable to Windows ConsoleSize: type: "array" description: | Initial console size, as an `[height, width]` array. (Windows only) minItems: 2 maxItems: 2 items: type: "integer" minimum: 0 Isolation: type: "string" description: | Isolation technology of the container. (Windows only) enum: - "default" - "process" - "hyperv" MaskedPaths: type: "array" description: | The list of paths to be masked inside the container (this overrides the default set of paths). items: type: "string" ReadonlyPaths: type: "array" description: | The list of paths to be set as read-only inside the container (this overrides the default set of paths). items: type: "string" ContainerConfig: description: "Configuration for a container that is portable between hosts" type: "object" properties: Hostname: description: "The hostname to use for the container, as a valid RFC 1123 hostname." type: "string" Domainname: description: "The domain name to use for the container." type: "string" User: description: "The user that commands are run as inside the container." type: "string" AttachStdin: description: "Whether to attach to `stdin`." type: "boolean" default: false AttachStdout: description: "Whether to attach to `stdout`." type: "boolean" default: true AttachStderr: description: "Whether to attach to `stderr`." type: "boolean" default: true ExposedPorts: description: | An object mapping ports to an empty object in the form: `{"<port>/<tcp|udp|sctp>": {}}` type: "object" additionalProperties: type: "object" enum: - {} default: {} Tty: description: | Attach standard streams to a TTY, including `stdin` if it is not closed. type: "boolean" default: false OpenStdin: description: "Open `stdin`" type: "boolean" default: false StdinOnce: description: "Close `stdin` after one attached client disconnects" type: "boolean" default: false Env: description: | A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. type: "array" items: type: "string" Cmd: description: | Command to run specified as a string or an array of strings. type: "array" items: type: "string" Healthcheck: $ref: "#/definitions/HealthConfig" ArgsEscaped: description: "Command is already escaped (Windows only)" type: "boolean" Image: description: | The name of the image to use when creating the container/ type: "string" Volumes: description: | An object mapping mount point paths inside the container to empty objects. type: "object" additionalProperties: type: "object" enum: - {} default: {} WorkingDir: description: "The working directory for commands to run in." type: "string" Entrypoint: description: | The entry point for the container as a string or an array of strings. If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). type: "array" items: type: "string" NetworkDisabled: description: "Disable networking for the container." type: "boolean" MacAddress: description: "MAC address of the container." type: "string" OnBuild: description: | `ONBUILD` metadata that were defined in the image's `Dockerfile`. type: "array" items: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" StopSignal: description: | Signal to stop a container as a string or unsigned integer. type: "string" default: "SIGTERM" StopTimeout: description: "Timeout to stop a container in seconds." type: "integer" default: 10 Shell: description: | Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. type: "array" items: type: "string" NetworkingConfig: description: | NetworkingConfig represents the container's networking configuration for each of its interfaces. It is used for the networking configs specified in the `docker create` and `docker network connect` commands. type: "object" properties: EndpointsConfig: description: | A mapping of network name to endpoint configuration for that network. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" example: # putting an example here, instead of using the example values from # /definitions/EndpointSettings, because containers/create currently # does not support attaching to multiple networks, so the example request # would be confusing if it showed that multiple networks can be contained # in the EndpointsConfig. # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) EndpointsConfig: isolated_nw: IPAMConfig: IPv4Address: "172.20.30.33" IPv6Address: "2001:db8:abcd::3033" LinkLocalIPs: - "169.254.34.68" - "fe80::3468" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" NetworkSettings: description: "NetworkSettings exposes the network settings in the API" type: "object" properties: Bridge: description: Name of the network's bridge (for example, `docker0`). type: "string" example: "docker0" SandboxID: description: SandboxID uniquely represents a container's network stack. type: "string" example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" HairpinMode: description: | Indicates if hairpin NAT should be enabled on the virtual interface. type: "boolean" example: false LinkLocalIPv6Address: description: IPv6 unicast address using the link-local prefix. type: "string" example: "fe80::42:acff:fe11:1" LinkLocalIPv6PrefixLen: description: Prefix length of the IPv6 unicast address. type: "integer" example: "64" Ports: $ref: "#/definitions/PortMap" SandboxKey: description: SandboxKey identifies the sandbox type: "string" example: "/var/run/docker/netns/8ab54b426c38" # TODO is SecondaryIPAddresses actually used? SecondaryIPAddresses: description: "" type: "array" items: $ref: "#/definitions/Address" x-nullable: true # TODO is SecondaryIPv6Addresses actually used? SecondaryIPv6Addresses: description: "" type: "array" items: $ref: "#/definitions/Address" x-nullable: true # TODO properties below are part of DefaultNetworkSettings, which is # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 EndpointID: description: | EndpointID uniquely represents a service endpoint in a Sandbox. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" Gateway: description: | Gateway address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "172.17.0.1" GlobalIPv6Address: description: | Global IPv6 address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "2001:db8::5689" GlobalIPv6PrefixLen: description: | Mask length of the global IPv6 address. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "integer" example: 64 IPAddress: description: | IPv4 address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "172.17.0.4" IPPrefixLen: description: | Mask length of the IPv4 address. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "integer" example: 16 IPv6Gateway: description: | IPv6 gateway address for this network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "2001:db8:2::100" MacAddress: description: | MAC address for the container on the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "02:42:ac:11:00:04" Networks: description: | Information about all networks that the container is connected to. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" Address: description: Address represents an IPv4 or IPv6 IP address. type: "object" properties: Addr: description: IP address. type: "string" PrefixLen: description: Mask length of the IP address. type: "integer" PortMap: description: | PortMap describes the mapping of container ports to host ports, using the container's port-number and protocol as key in the format `<port>/<protocol>`, for example, `80/udp`. If a container's port is mapped for multiple protocols, separate entries are added to the mapping table. type: "object" additionalProperties: type: "array" x-nullable: true items: $ref: "#/definitions/PortBinding" example: "443/tcp": - HostIp: "127.0.0.1" HostPort: "4443" "80/tcp": - HostIp: "0.0.0.0" HostPort: "80" - HostIp: "0.0.0.0" HostPort: "8080" "80/udp": - HostIp: "0.0.0.0" HostPort: "80" "53/udp": - HostIp: "0.0.0.0" HostPort: "53" "2377/tcp": null PortBinding: description: | PortBinding represents a binding between a host IP address and a host port. type: "object" properties: HostIp: description: "Host IP address that the container's port is mapped to." type: "string" example: "127.0.0.1" HostPort: description: "Host port number that the container's port is mapped to." type: "string" example: "4443" GraphDriverData: description: "Information about a container's graph driver." type: "object" required: [Name, Data] properties: Name: type: "string" x-nullable: false Data: type: "object" x-nullable: false additionalProperties: type: "string" Image: type: "object" required: - Id - Parent - Comment - Created - Container - DockerVersion - Author - Architecture - Os - Size - VirtualSize - GraphDriver - RootFS properties: Id: type: "string" x-nullable: false RepoTags: type: "array" items: type: "string" RepoDigests: type: "array" items: type: "string" Parent: type: "string" x-nullable: false Comment: type: "string" x-nullable: false Created: type: "string" x-nullable: false Container: type: "string" x-nullable: false ContainerConfig: $ref: "#/definitions/ContainerConfig" DockerVersion: type: "string" x-nullable: false Author: type: "string" x-nullable: false Config: $ref: "#/definitions/ContainerConfig" Architecture: type: "string" x-nullable: false Os: type: "string" x-nullable: false OsVersion: type: "string" Size: type: "integer" format: "int64" x-nullable: false VirtualSize: type: "integer" format: "int64" x-nullable: false GraphDriver: $ref: "#/definitions/GraphDriverData" RootFS: type: "object" required: [Type] properties: Type: type: "string" x-nullable: false Layers: type: "array" items: type: "string" BaseLayer: type: "string" Metadata: type: "object" properties: LastTagTime: type: "string" format: "dateTime" ImageSummary: type: "object" required: - Id - ParentId - RepoTags - RepoDigests - Created - Size - SharedSize - VirtualSize - Labels - Containers properties: Id: type: "string" x-nullable: false ParentId: type: "string" x-nullable: false RepoTags: type: "array" x-nullable: false items: type: "string" RepoDigests: type: "array" x-nullable: false items: type: "string" Created: type: "integer" x-nullable: false Size: type: "integer" x-nullable: false SharedSize: type: "integer" x-nullable: false VirtualSize: type: "integer" x-nullable: false Labels: type: "object" x-nullable: false additionalProperties: type: "string" Containers: x-nullable: false type: "integer" AuthConfig: type: "object" properties: username: type: "string" password: type: "string" email: type: "string" serveraddress: type: "string" example: username: "hannibal" password: "xxxx" serveraddress: "https://index.docker.io/v1/" ProcessConfig: type: "object" properties: privileged: type: "boolean" user: type: "string" tty: type: "boolean" entrypoint: type: "string" arguments: type: "array" items: type: "string" Volume: type: "object" required: [Name, Driver, Mountpoint, Labels, Scope, Options] properties: Name: type: "string" description: "Name of the volume." x-nullable: false Driver: type: "string" description: "Name of the volume driver used by the volume." x-nullable: false Mountpoint: type: "string" description: "Mount path of the volume on the host." x-nullable: false CreatedAt: type: "string" format: "dateTime" description: "Date/Time the volume was created." Status: type: "object" description: | Low-level details about the volume, provided by the volume driver. Details are returned as a map with key/value pairs: `{"key":"value","key2":"value2"}`. The `Status` field is optional, and is omitted if the volume driver does not support this feature. additionalProperties: type: "object" Labels: type: "object" description: "User-defined key/value metadata." x-nullable: false additionalProperties: type: "string" Scope: type: "string" description: | The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. default: "local" x-nullable: false enum: ["local", "global"] Options: type: "object" description: | The driver specific options used when creating the volume. additionalProperties: type: "string" UsageData: type: "object" x-nullable: true required: [Size, RefCount] description: | Usage details about the volume. This information is used by the `GET /system/df` endpoint, and omitted in other endpoints. properties: Size: type: "integer" default: -1 description: | Amount of disk space used by the volume (in bytes). This information is only available for volumes created with the `"local"` volume driver. For volumes created with other volume drivers, this field is set to `-1` ("not available") x-nullable: false RefCount: type: "integer" default: -1 description: | The number of containers referencing this volume. This field is set to `-1` if the reference-count is not available. x-nullable: false example: Name: "tardis" Driver: "custom" Mountpoint: "/var/lib/docker/volumes/tardis" Status: hello: "world" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Scope: "local" CreatedAt: "2016-06-07T20:31:11.853781916Z" Network: type: "object" properties: Name: type: "string" Id: type: "string" Created: type: "string" format: "dateTime" Scope: type: "string" Driver: type: "string" EnableIPv6: type: "boolean" IPAM: $ref: "#/definitions/IPAM" Internal: type: "boolean" Attachable: type: "boolean" Ingress: type: "boolean" Containers: type: "object" additionalProperties: $ref: "#/definitions/NetworkContainer" Options: type: "object" additionalProperties: type: "string" Labels: type: "object" additionalProperties: type: "string" example: Name: "net01" Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" Created: "2016-10-19T04:33:30.360899459Z" Scope: "local" Driver: "bridge" EnableIPv6: false IPAM: Driver: "default" Config: - Subnet: "172.19.0.0/16" Gateway: "172.19.0.1" Options: foo: "bar" Internal: false Attachable: false Ingress: false Containers: 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: Name: "test" EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" MacAddress: "02:42:ac:13:00:02" IPv4Address: "172.19.0.2/16" IPv6Address: "" Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" IPAM: type: "object" properties: Driver: description: "Name of the IPAM driver to use." type: "string" default: "default" Config: description: | List of IPAM configuration options, specified as a map: ``` {"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>} ``` type: "array" items: type: "object" additionalProperties: type: "string" Options: description: "Driver-specific options, specified as a map." type: "object" additionalProperties: type: "string" NetworkContainer: type: "object" properties: Name: type: "string" EndpointID: type: "string" MacAddress: type: "string" IPv4Address: type: "string" IPv6Address: type: "string" BuildInfo: type: "object" properties: id: type: "string" stream: type: "string" error: type: "string" errorDetail: $ref: "#/definitions/ErrorDetail" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" aux: $ref: "#/definitions/ImageID" BuildCache: type: "object" properties: ID: type: "string" Parent: type: "string" Type: type: "string" Description: type: "string" InUse: type: "boolean" Shared: type: "boolean" Size: description: | Amount of disk space used by the build cache (in bytes). type: "integer" CreatedAt: description: | Date and time at which the build cache was created in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" LastUsedAt: description: | Date and time at which the build cache was last used in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" x-nullable: true example: "2017-08-09T07:09:37.632105588Z" UsageCount: type: "integer" ImageID: type: "object" description: "Image ID or Digest" properties: ID: type: "string" example: ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" CreateImageInfo: type: "object" properties: id: type: "string" error: type: "string" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" PushImageInfo: type: "object" properties: error: type: "string" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" ErrorDetail: type: "object" properties: code: type: "integer" message: type: "string" ProgressDetail: type: "object" properties: current: type: "integer" total: type: "integer" ErrorResponse: description: "Represents an error." type: "object" required: ["message"] properties: message: description: "The error message." type: "string" x-nullable: false example: message: "Something went wrong." IdResponse: description: "Response to an API call that returns just an Id" type: "object" required: ["Id"] properties: Id: description: "The id of the newly created object." type: "string" x-nullable: false EndpointSettings: description: "Configuration for a network endpoint." type: "object" properties: # Configurations IPAMConfig: $ref: "#/definitions/EndpointIPAMConfig" Links: type: "array" items: type: "string" example: - "container_1" - "container_2" Aliases: type: "array" items: type: "string" example: - "server_x" - "server_y" # Operational data NetworkID: description: | Unique ID of the network. type: "string" example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" EndpointID: description: | Unique ID for the service endpoint in a Sandbox. type: "string" example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" Gateway: description: | Gateway address for this network. type: "string" example: "172.17.0.1" IPAddress: description: | IPv4 address. type: "string" example: "172.17.0.4" IPPrefixLen: description: | Mask length of the IPv4 address. type: "integer" example: 16 IPv6Gateway: description: | IPv6 gateway address. type: "string" example: "2001:db8:2::100" GlobalIPv6Address: description: | Global IPv6 address. type: "string" example: "2001:db8::5689" GlobalIPv6PrefixLen: description: | Mask length of the global IPv6 address. type: "integer" format: "int64" example: 64 MacAddress: description: | MAC address for the endpoint on this network. type: "string" example: "02:42:ac:11:00:04" DriverOpts: description: | DriverOpts is a mapping of driver options and values. These options are passed directly to the driver and are driver specific. type: "object" x-nullable: true additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" EndpointIPAMConfig: description: | EndpointIPAMConfig represents an endpoint's IPAM configuration. type: "object" x-nullable: true properties: IPv4Address: type: "string" example: "172.20.30.33" IPv6Address: type: "string" example: "2001:db8:abcd::3033" LinkLocalIPs: type: "array" items: type: "string" example: - "169.254.34.68" - "fe80::3468" PluginMount: type: "object" x-nullable: false required: [Name, Description, Settable, Source, Destination, Type, Options] properties: Name: type: "string" x-nullable: false example: "some-mount" Description: type: "string" x-nullable: false example: "This is a mount that's used by the plugin." Settable: type: "array" items: type: "string" Source: type: "string" example: "/var/lib/docker/plugins/" Destination: type: "string" x-nullable: false example: "/mnt/state" Type: type: "string" x-nullable: false example: "bind" Options: type: "array" items: type: "string" example: - "rbind" - "rw" PluginDevice: type: "object" required: [Name, Description, Settable, Path] x-nullable: false properties: Name: type: "string" x-nullable: false Description: type: "string" x-nullable: false Settable: type: "array" items: type: "string" Path: type: "string" example: "/dev/fuse" PluginEnv: type: "object" x-nullable: false required: [Name, Description, Settable, Value] properties: Name: x-nullable: false type: "string" Description: x-nullable: false type: "string" Settable: type: "array" items: type: "string" Value: type: "string" PluginInterfaceType: type: "object" x-nullable: false required: [Prefix, Capability, Version] properties: Prefix: type: "string" x-nullable: false Capability: type: "string" x-nullable: false Version: type: "string" x-nullable: false PluginPrivilege: description: | Describes a permission the user has to accept upon installing the plugin. type: "object" x-go-name: "PluginPrivilege" properties: Name: type: "string" example: "network" Description: type: "string" Value: type: "array" items: type: "string" example: - "host" Plugin: description: "A plugin for the Engine API" type: "object" required: [Settings, Enabled, Config, Name] properties: Id: type: "string" example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" Name: type: "string" x-nullable: false example: "tiborvass/sample-volume-plugin" Enabled: description: True if the plugin is running. False if the plugin is not running, only installed. type: "boolean" x-nullable: false example: true Settings: description: "Settings that can be modified by users." type: "object" x-nullable: false required: [Args, Devices, Env, Mounts] properties: Mounts: type: "array" items: $ref: "#/definitions/PluginMount" Env: type: "array" items: type: "string" example: - "DEBUG=0" Args: type: "array" items: type: "string" Devices: type: "array" items: $ref: "#/definitions/PluginDevice" PluginReference: description: "plugin remote reference used to push/pull the plugin" type: "string" x-nullable: false example: "localhost:5000/tiborvass/sample-volume-plugin:latest" Config: description: "The config of a plugin." type: "object" x-nullable: false required: - Description - Documentation - Interface - Entrypoint - WorkDir - Network - Linux - PidHost - PropagatedMount - IpcHost - Mounts - Env - Args properties: DockerVersion: description: "Docker Version used to create the plugin" type: "string" x-nullable: false example: "17.06.0-ce" Description: type: "string" x-nullable: false example: "A sample volume plugin for Docker" Documentation: type: "string" x-nullable: false example: "https://docs.docker.com/engine/extend/plugins/" Interface: description: "The interface between Docker and the plugin" x-nullable: false type: "object" required: [Types, Socket] properties: Types: type: "array" items: $ref: "#/definitions/PluginInterfaceType" example: - "docker.volumedriver/1.0" Socket: type: "string" x-nullable: false example: "plugins.sock" ProtocolScheme: type: "string" example: "some.protocol/v1.0" description: "Protocol to use for clients connecting to the plugin." enum: - "" - "moby.plugins.http/v1" Entrypoint: type: "array" items: type: "string" example: - "/usr/bin/sample-volume-plugin" - "/data" WorkDir: type: "string" x-nullable: false example: "/bin/" User: type: "object" x-nullable: false properties: UID: type: "integer" format: "uint32" example: 1000 GID: type: "integer" format: "uint32" example: 1000 Network: type: "object" x-nullable: false required: [Type] properties: Type: x-nullable: false type: "string" example: "host" Linux: type: "object" x-nullable: false required: [Capabilities, AllowAllDevices, Devices] properties: Capabilities: type: "array" items: type: "string" example: - "CAP_SYS_ADMIN" - "CAP_SYSLOG" AllowAllDevices: type: "boolean" x-nullable: false example: false Devices: type: "array" items: $ref: "#/definitions/PluginDevice" PropagatedMount: type: "string" x-nullable: false example: "/mnt/volumes" IpcHost: type: "boolean" x-nullable: false example: false PidHost: type: "boolean" x-nullable: false example: false Mounts: type: "array" items: $ref: "#/definitions/PluginMount" Env: type: "array" items: $ref: "#/definitions/PluginEnv" example: - Name: "DEBUG" Description: "If set, prints debug messages" Settable: null Value: "0" Args: type: "object" x-nullable: false required: [Name, Description, Settable, Value] properties: Name: x-nullable: false type: "string" example: "args" Description: x-nullable: false type: "string" example: "command line arguments" Settable: type: "array" items: type: "string" Value: type: "array" items: type: "string" rootfs: type: "object" properties: type: type: "string" example: "layers" diff_ids: type: "array" items: type: "string" example: - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" ObjectVersion: description: | The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. The client must send the version number along with the modified specification when updating these objects. This approach ensures safe concurrency and determinism in that the change on the object may not be applied if the version number has changed from the last read. In other words, if two update requests specify the same base version, only one of the requests can succeed. As a result, two separate update requests that happen at the same time will not unintentionally overwrite each other. type: "object" properties: Index: type: "integer" format: "uint64" example: 373531 NodeSpec: type: "object" properties: Name: description: "Name for the node." type: "string" example: "my-node" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Role: description: "Role of the node." type: "string" enum: - "worker" - "manager" example: "manager" Availability: description: "Availability of the node." type: "string" enum: - "active" - "pause" - "drain" example: "active" example: Availability: "active" Name: "node-name" Role: "manager" Labels: foo: "bar" Node: type: "object" properties: ID: type: "string" example: "24ifsmvkjbyhk" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: description: | Date and time at which the node was added to the swarm in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: description: | Date and time at which the node was last updated in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/NodeSpec" Description: $ref: "#/definitions/NodeDescription" Status: $ref: "#/definitions/NodeStatus" ManagerStatus: $ref: "#/definitions/ManagerStatus" NodeDescription: description: | NodeDescription encapsulates the properties of the Node as reported by the agent. type: "object" properties: Hostname: type: "string" example: "bf3067039e47" Platform: $ref: "#/definitions/Platform" Resources: $ref: "#/definitions/ResourceObject" Engine: $ref: "#/definitions/EngineDescription" TLSInfo: $ref: "#/definitions/TLSInfo" Platform: description: | Platform represents the platform (Arch/OS). type: "object" properties: Architecture: description: | Architecture represents the hardware architecture (for example, `x86_64`). type: "string" example: "x86_64" OS: description: | OS represents the Operating System (for example, `linux` or `windows`). type: "string" example: "linux" EngineDescription: description: "EngineDescription provides information about an engine." type: "object" properties: EngineVersion: type: "string" example: "17.06.0" Labels: type: "object" additionalProperties: type: "string" example: foo: "bar" Plugins: type: "array" items: type: "object" properties: Type: type: "string" Name: type: "string" example: - Type: "Log" Name: "awslogs" - Type: "Log" Name: "fluentd" - Type: "Log" Name: "gcplogs" - Type: "Log" Name: "gelf" - Type: "Log" Name: "journald" - Type: "Log" Name: "json-file" - Type: "Log" Name: "logentries" - Type: "Log" Name: "splunk" - Type: "Log" Name: "syslog" - Type: "Network" Name: "bridge" - Type: "Network" Name: "host" - Type: "Network" Name: "ipvlan" - Type: "Network" Name: "macvlan" - Type: "Network" Name: "null" - Type: "Network" Name: "overlay" - Type: "Volume" Name: "local" - Type: "Volume" Name: "localhost:5000/vieux/sshfs:latest" - Type: "Volume" Name: "vieux/sshfs:latest" TLSInfo: description: | Information about the issuer of leaf TLS certificates and the trusted root CA certificate. type: "object" properties: TrustRoot: description: | The root CA certificate(s) that are used to validate leaf TLS certificates. type: "string" CertIssuerSubject: description: The base64-url-safe-encoded raw subject bytes of the issuer. type: "string" CertIssuerPublicKey: description: | The base64-url-safe-encoded raw public key bytes of the issuer. type: "string" example: TrustRoot: | -----BEGIN CERTIFICATE----- MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H -----END CERTIFICATE----- CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" NodeStatus: description: | NodeStatus represents the status of a node. It provides the current status of the node, as seen by the manager. type: "object" properties: State: $ref: "#/definitions/NodeState" Message: type: "string" example: "" Addr: description: "IP address of the node." type: "string" example: "172.17.0.2" NodeState: description: "NodeState represents the state of a node." type: "string" enum: - "unknown" - "down" - "ready" - "disconnected" example: "ready" ManagerStatus: description: | ManagerStatus represents the status of a manager. It provides the current status of a node's manager component, if the node is a manager. x-nullable: true type: "object" properties: Leader: type: "boolean" default: false example: true Reachability: $ref: "#/definitions/Reachability" Addr: description: | The IP address and port at which the manager is reachable. type: "string" example: "10.0.0.46:2377" Reachability: description: "Reachability represents the reachability of a node." type: "string" enum: - "unknown" - "unreachable" - "reachable" example: "reachable" SwarmSpec: description: "User modifiable swarm configuration." type: "object" properties: Name: description: "Name of the swarm." type: "string" example: "default" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.corp.type: "production" com.example.corp.department: "engineering" Orchestration: description: "Orchestration configuration." type: "object" x-nullable: true properties: TaskHistoryRetentionLimit: description: | The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks. type: "integer" format: "int64" example: 10 Raft: description: "Raft configuration." type: "object" properties: SnapshotInterval: description: "The number of log entries between snapshots." type: "integer" format: "uint64" example: 10000 KeepOldSnapshots: description: | The number of snapshots to keep beyond the current snapshot. type: "integer" format: "uint64" LogEntriesForSlowFollowers: description: | The number of log entries to keep around to sync up slow followers after a snapshot is created. type: "integer" format: "uint64" example: 500 ElectionTick: description: | The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 3 HeartbeatTick: description: | The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 1 Dispatcher: description: "Dispatcher configuration." type: "object" x-nullable: true properties: HeartbeatPeriod: description: | The delay for an agent to send a heartbeat to the dispatcher. type: "integer" format: "int64" example: 5000000000 CAConfig: description: "CA configuration." type: "object" x-nullable: true properties: NodeCertExpiry: description: "The duration node certificates are issued for." type: "integer" format: "int64" example: 7776000000000000 ExternalCAs: description: | Configuration for forwarding signing requests to an external certificate authority. type: "array" items: type: "object" properties: Protocol: description: | Protocol for communication with the external CA (currently only `cfssl` is supported). type: "string" enum: - "cfssl" default: "cfssl" URL: description: | URL where certificate signing requests should be sent. type: "string" Options: description: | An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver. type: "object" additionalProperties: type: "string" CACert: description: | The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided). type: "string" SigningCACert: description: | The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format. type: "string" SigningCAKey: description: | The desired signing CA key for all swarm node TLS leaf certificates, in PEM format. type: "string" ForceRotate: description: | An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey` format: "uint64" type: "integer" EncryptionConfig: description: "Parameters related to encryption-at-rest." type: "object" properties: AutoLockManagers: description: | If set, generate a key and use it to lock data stored on the managers. type: "boolean" example: false TaskDefaults: description: "Defaults for creating tasks in this cluster." type: "object" properties: LogDriver: description: | The log driver to use for tasks created in the orchestrator if unspecified by a service. Updating this value only affects new tasks. Existing tasks continue to use their previously configured log driver until recreated. type: "object" properties: Name: description: | The log driver to use as a default for new tasks. type: "string" example: "json-file" Options: description: | Driver-specific options for the selectd log driver, specified as key/value pairs. type: "object" additionalProperties: type: "string" example: "max-file": "10" "max-size": "100m" # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but # without `JoinTokens`. ClusterInfo: description: | ClusterInfo represents information about the swarm as is returned by the "/info" endpoint. Join-tokens are not included. x-nullable: true type: "object" properties: ID: description: "The ID of the swarm." type: "string" example: "abajmipo7b4xz5ip2nrla6b11" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: description: | Date and time at which the swarm was initialised in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: description: | Date and time at which the swarm was last updated in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/SwarmSpec" TLSInfo: $ref: "#/definitions/TLSInfo" RootRotationInProgress: description: | Whether there is currently a root CA rotation in progress for the swarm type: "boolean" example: false DataPathPort: description: | DataPathPort specifies the data path port number for data traffic. Acceptable port range is 1024 to 49151. If no port is set or is set to 0, the default port (4789) is used. type: "integer" format: "uint32" default: 4789 example: 4789 DefaultAddrPool: description: | Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" format: "CIDR" example: ["10.10.0.0/16", "20.20.0.0/16"] SubnetSize: description: | SubnetSize specifies the subnet size of the networks created from the default subnet pool. type: "integer" format: "uint32" maximum: 29 default: 24 example: 24 JoinTokens: description: | JoinTokens contains the tokens workers and managers need to join the swarm. type: "object" properties: Worker: description: | The token workers can use to join the swarm. type: "string" example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" Manager: description: | The token managers can use to join the swarm. type: "string" example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" Swarm: type: "object" allOf: - $ref: "#/definitions/ClusterInfo" - type: "object" properties: JoinTokens: $ref: "#/definitions/JoinTokens" TaskSpec: description: "User modifiable task configuration." type: "object" properties: PluginSpec: type: "object" description: | Plugin spec for the service. *(Experimental release only.)* <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. properties: Name: description: "The name or 'alias' to use for the plugin." type: "string" Remote: description: "The plugin image reference to use." type: "string" Disabled: description: "Disable the plugin once scheduled." type: "boolean" PluginPrivilege: type: "array" items: $ref: "#/definitions/PluginPrivilege" ContainerSpec: type: "object" description: | Container spec for the service. <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. properties: Image: description: "The image name to use for the container" type: "string" Labels: description: "User-defined key/value data." type: "object" additionalProperties: type: "string" Command: description: "The command to be run in the image." type: "array" items: type: "string" Args: description: "Arguments to the command." type: "array" items: type: "string" Hostname: description: | The hostname to use for the container, as a valid [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. type: "string" Env: description: | A list of environment variables in the form `VAR=value`. type: "array" items: type: "string" Dir: description: "The working directory for commands to run in." type: "string" User: description: "The user inside the container." type: "string" Groups: type: "array" description: | A list of additional groups that the container process will run as. items: type: "string" Privileges: type: "object" description: "Security options for the container" properties: CredentialSpec: type: "object" description: "CredentialSpec for managed service account (Windows only)" properties: Config: type: "string" example: "0bt9dmxjvjiqermk6xrop3ekq" description: | Load credential spec from a Swarm Config with the given ID. The specified config must also be present in the Configs field with the Runtime property set. <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. File: type: "string" example: "spec.json" description: | Load credential spec from this file. The file is read by the daemon, and must be present in the `CredentialSpecs` subdirectory in the docker data directory, which defaults to `C:\ProgramData\Docker\` on Windows. For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`. <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. Registry: type: "string" description: | Load credential spec from this value in the Windows registry. The specified registry value must be located in: `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. SELinuxContext: type: "object" description: "SELinux labels of the container" properties: Disable: type: "boolean" description: "Disable SELinux" User: type: "string" description: "SELinux user label" Role: type: "string" description: "SELinux role label" Type: type: "string" description: "SELinux type label" Level: type: "string" description: "SELinux level label" TTY: description: "Whether a pseudo-TTY should be allocated." type: "boolean" OpenStdin: description: "Open `stdin`" type: "boolean" ReadOnly: description: "Mount the container's root filesystem as read only." type: "boolean" Mounts: description: | Specification for mounts to be added to containers created as part of the service. type: "array" items: $ref: "#/definitions/Mount" StopSignal: description: "Signal to stop the container." type: "string" StopGracePeriod: description: | Amount of time to wait for the container to terminate before forcefully killing it. type: "integer" format: "int64" HealthCheck: $ref: "#/definitions/HealthConfig" Hosts: type: "array" description: | A list of hostname/IP mappings to add to the container's `hosts` file. The format of extra hosts is specified in the [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) man page: IP_address canonical_hostname [aliases...] items: type: "string" DNSConfig: description: | Specification for DNS related configurations in resolver configuration file (`resolv.conf`). type: "object" properties: Nameservers: description: "The IP addresses of the name servers." type: "array" items: type: "string" Search: description: "A search list for host-name lookup." type: "array" items: type: "string" Options: description: | A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.). type: "array" items: type: "string" Secrets: description: | Secrets contains references to zero or more secrets that will be exposed to the service. type: "array" items: type: "object" properties: File: description: | File represents a specific target that is backed by a file. type: "object" properties: Name: description: | Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." type: "string" GID: description: "GID represents the file GID." type: "string" Mode: description: "Mode represents the FileMode of the file." type: "integer" format: "uint32" SecretID: description: | SecretID represents the ID of the specific secret that we're referencing. type: "string" SecretName: description: | SecretName is the name of the secret that this references, but this is just provided for lookup/display purposes. The secret in the reference will be identified by its ID. type: "string" Configs: description: | Configs contains references to zero or more configs that will be exposed to the service. type: "array" items: type: "object" properties: File: description: | File represents a specific target that is backed by a file. <p><br /><p> > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive type: "object" properties: Name: description: | Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." type: "string" GID: description: "GID represents the file GID." type: "string" Mode: description: "Mode represents the FileMode of the file." type: "integer" format: "uint32" Runtime: description: | Runtime represents a target that is not mounted into the container but is used by the task <p><br /><p> > **Note**: `Configs.File` and `Configs.Runtime` are mutually > exclusive type: "object" ConfigID: description: | ConfigID represents the ID of the specific config that we're referencing. type: "string" ConfigName: description: | ConfigName is the name of the config that this references, but this is just provided for lookup/display purposes. The config in the reference will be identified by its ID. type: "string" Isolation: type: "string" description: | Isolation technology of the containers running the service. (Windows only) enum: - "default" - "process" - "hyperv" Init: description: | Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. type: "boolean" x-nullable: true Sysctls: description: | Set kernel namedspaced parameters (sysctls) in the container. The Sysctls option on services accepts the same sysctls as the are supported on containers. Note that while the same sysctls are supported, no guarantees or checks are made about their suitability for a clustered environment, and it's up to the user to determine whether a given sysctl will work properly in a Service. type: "object" additionalProperties: type: "string" # This option is not used by Windows containers CapabilityAdd: type: "array" description: | A list of kernel capabilities to add to the default set for the container. items: type: "string" example: - "CAP_NET_RAW" - "CAP_SYS_ADMIN" - "CAP_SYS_CHROOT" - "CAP_SYSLOG" CapabilityDrop: type: "array" description: | A list of kernel capabilities to drop from the default set for the container. items: type: "string" example: - "CAP_NET_RAW" Ulimits: description: | A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" type: "array" items: type: "object" properties: Name: description: "Name of ulimit" type: "string" Soft: description: "Soft limit" type: "integer" Hard: description: "Hard limit" type: "integer" NetworkAttachmentSpec: description: | Read-only spec type for non-swarm containers attached to swarm overlay networks. <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. type: "object" properties: ContainerID: description: "ID of the container represented by this task" type: "string" Resources: description: | Resource requirements which apply to each individual container created as part of the service. type: "object" properties: Limits: description: "Define resources limits." $ref: "#/definitions/Limit" Reservation: description: "Define resources reservation." $ref: "#/definitions/ResourceObject" RestartPolicy: description: | Specification for the restart policy which applies to containers created as part of this service. type: "object" properties: Condition: description: "Condition for restart." type: "string" enum: - "none" - "on-failure" - "any" Delay: description: "Delay between restart attempts." type: "integer" format: "int64" MaxAttempts: description: | Maximum attempts to restart a given container before giving up (default value is 0, which is ignored). type: "integer" format: "int64" default: 0 Window: description: | Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded). type: "integer" format: "int64" default: 0 Placement: type: "object" properties: Constraints: description: | An array of constraint expressions to limit the set of nodes where a task can be scheduled. Constraint expressions can either use a _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find nodes that satisfy every expression (AND match). Constraints can match node or Docker Engine labels as follows: node attribute | matches | example ---------------------|--------------------------------|----------------------------------------------- `node.id` | Node ID | `node.id==2ivku8v2gvtg4` `node.hostname` | Node hostname | `node.hostname!=node-2` `node.role` | Node role (`manager`/`worker`) | `node.role==manager` `node.platform.os` | Node operating system | `node.platform.os==windows` `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` `node.labels` | User-defined node labels | `node.labels.security==high` `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` `engine.labels` apply to Docker Engine labels like operating system, drivers, etc. Swarm administrators add `node.labels` for operational purposes by using the [`node update endpoint`](#operation/NodeUpdate). type: "array" items: type: "string" example: - "node.hostname!=node3.corp.example.com" - "node.role!=manager" - "node.labels.type==production" - "node.platform.os==linux" - "node.platform.arch==x86_64" Preferences: description: | Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence. type: "array" items: type: "object" properties: Spread: type: "object" properties: SpreadDescriptor: description: | label descriptor, such as `engine.labels.az`. type: "string" example: - Spread: SpreadDescriptor: "node.labels.datacenter" - Spread: SpreadDescriptor: "node.labels.rack" MaxReplicas: description: | Maximum number of replicas for per node (default value is 0, which is unlimited) type: "integer" format: "int64" default: 0 Platforms: description: | Platforms stores all the platforms that the service's image can run on. This field is used in the platform filter for scheduling. If empty, then the platform filter is off, meaning there are no scheduling restrictions. type: "array" items: $ref: "#/definitions/Platform" ForceUpdate: description: | A counter that triggers an update even if no relevant parameters have been changed. type: "integer" Runtime: description: | Runtime is the type of runtime specified for the task executor. type: "string" Networks: description: "Specifies which networks the service should attach to." type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" LogDriver: description: | Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified. type: "object" properties: Name: type: "string" Options: type: "object" additionalProperties: type: "string" TaskState: type: "string" enum: - "new" - "allocated" - "pending" - "assigned" - "accepted" - "preparing" - "ready" - "starting" - "running" - "complete" - "shutdown" - "failed" - "rejected" - "remove" - "orphaned" Task: type: "object" properties: ID: description: "The ID of the task." type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Name: description: "Name of the task." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Spec: $ref: "#/definitions/TaskSpec" ServiceID: description: "The ID of the service this task is part of." type: "string" Slot: type: "integer" NodeID: description: "The ID of the node that this task is on." type: "string" AssignedGenericResources: $ref: "#/definitions/GenericResources" Status: type: "object" properties: Timestamp: type: "string" format: "dateTime" State: $ref: "#/definitions/TaskState" Message: type: "string" Err: type: "string" ContainerStatus: type: "object" properties: ContainerID: type: "string" PID: type: "integer" ExitCode: type: "integer" DesiredState: $ref: "#/definitions/TaskState" JobIteration: description: | If the Service this Task belongs to is a job-mode service, contains the JobIteration of the Service this Task was created for. Absent if the Task was created for a Replicated or Global Service. $ref: "#/definitions/ObjectVersion" example: ID: "0kzzo1i0y4jz6027t0k7aezc7" Version: Index: 71 CreatedAt: "2016-06-07T21:07:31.171892745Z" UpdatedAt: "2016-06-07T21:07:31.376370513Z" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:31.290032978Z" State: "running" Message: "started" ContainerStatus: ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" PID: 677 DesiredState: "running" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.10/16" AssignedGenericResources: - DiscreteResourceSpec: Kind: "SSD" Value: 3 - NamedResourceSpec: Kind: "GPU" Value: "UUID1" - NamedResourceSpec: Kind: "GPU" Value: "UUID2" ServiceSpec: description: "User modifiable configuration for a service." properties: Name: description: "Name of the service." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" TaskTemplate: $ref: "#/definitions/TaskSpec" Mode: description: "Scheduling mode for the service." type: "object" properties: Replicated: type: "object" properties: Replicas: type: "integer" format: "int64" Global: type: "object" ReplicatedJob: description: | The mode used for services with a finite number of tasks that run to a completed state. type: "object" properties: MaxConcurrent: description: | The maximum number of replicas to run simultaneously. type: "integer" format: "int64" default: 1 TotalCompletions: description: | The total number of replicas desired to reach the Completed state. If unset, will default to the value of `MaxConcurrent` type: "integer" format: "int64" GlobalJob: description: | The mode used for services which run a task to the completed state on each valid node. type: "object" UpdateConfig: description: "Specification for the update strategy of the service." type: "object" properties: Parallelism: description: | Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism). type: "integer" format: "int64" Delay: description: "Amount of time between updates, in nanoseconds." type: "integer" format: "int64" FailureAction: description: | Action to take if an updated task fails to run, or stops running during the update. type: "string" enum: - "continue" - "pause" - "rollback" Monitor: description: | Amount of time to monitor each updated task for failures, in nanoseconds. type: "integer" format: "int64" MaxFailureRatio: description: | The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1. type: "number" default: 0 Order: description: | The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down. type: "string" enum: - "stop-first" - "start-first" RollbackConfig: description: "Specification for the rollback strategy of the service." type: "object" properties: Parallelism: description: | Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism). type: "integer" format: "int64" Delay: description: | Amount of time between rollback iterations, in nanoseconds. type: "integer" format: "int64" FailureAction: description: | Action to take if an rolled back task fails to run, or stops running during the rollback. type: "string" enum: - "continue" - "pause" Monitor: description: | Amount of time to monitor each rolled back task for failures, in nanoseconds. type: "integer" format: "int64" MaxFailureRatio: description: | The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1. type: "number" default: 0 Order: description: | The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down. type: "string" enum: - "stop-first" - "start-first" Networks: description: "Specifies which networks the service should attach to." type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" EndpointSpec: $ref: "#/definitions/EndpointSpec" EndpointPortConfig: type: "object" properties: Name: type: "string" Protocol: type: "string" enum: - "tcp" - "udp" - "sctp" TargetPort: description: "The port inside the container." type: "integer" PublishedPort: description: "The port on the swarm hosts." type: "integer" PublishMode: description: | The mode in which port is published. <p><br /></p> - "ingress" makes the target port accessible on every node, regardless of whether there is a task for the service running on that node or not. - "host" bypasses the routing mesh and publish the port directly on the swarm node where that service is running. type: "string" enum: - "ingress" - "host" default: "ingress" example: "ingress" EndpointSpec: description: "Properties that can be configured to access and load balance a service." type: "object" properties: Mode: description: | The mode of resolution to use for internal load balancing between tasks. type: "string" enum: - "vip" - "dnsrr" default: "vip" Ports: description: | List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used. type: "array" items: $ref: "#/definitions/EndpointPortConfig" Service: type: "object" properties: ID: type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Spec: $ref: "#/definitions/ServiceSpec" Endpoint: type: "object" properties: Spec: $ref: "#/definitions/EndpointSpec" Ports: type: "array" items: $ref: "#/definitions/EndpointPortConfig" VirtualIPs: type: "array" items: type: "object" properties: NetworkID: type: "string" Addr: type: "string" UpdateStatus: description: "The status of a service update." type: "object" properties: State: type: "string" enum: - "updating" - "paused" - "completed" StartedAt: type: "string" format: "dateTime" CompletedAt: type: "string" format: "dateTime" Message: type: "string" ServiceStatus: description: | The status of the service's tasks. Provided only when requested as part of a ServiceList operation. type: "object" properties: RunningTasks: description: | The number of tasks for the service currently in the Running state. type: "integer" format: "uint64" example: 7 DesiredTasks: description: | The number of tasks for the service desired to be running. For replicated services, this is the replica count from the service spec. For global services, this is computed by taking count of all tasks for the service with a Desired State other than Shutdown. type: "integer" format: "uint64" example: 10 CompletedTasks: description: | The number of tasks for a job that are in the Completed state. This field must be cross-referenced with the service type, as the value of 0 may mean the service is not in a job mode, or it may mean the job-mode service has no tasks yet Completed. type: "integer" format: "uint64" JobStatus: description: | The status of the service when it is in one of ReplicatedJob or GlobalJob modes. Absent on Replicated and Global mode services. The JobIteration is an ObjectVersion, but unlike the Service's version, does not need to be sent with an update request. type: "object" properties: JobIteration: description: | JobIteration is a value increased each time a Job is executed, successfully or otherwise. "Executed", in this case, means the job as a whole has been started, not that an individual Task has been launched. A job is "Executed" when its ServiceSpec is updated. JobIteration can be used to disambiguate Tasks belonging to different executions of a job. Though JobIteration will increase with each subsequent execution, it may not necessarily increase by 1, and so JobIteration should not be used to $ref: "#/definitions/ObjectVersion" LastExecution: description: | The last time, as observed by the server, that this job was started. type: "string" format: "dateTime" example: ID: "9mnpnzenvg8p8tdbtq4wvbkcz" Version: Index: 19 CreatedAt: "2016-06-07T21:05:51.880065305Z" UpdatedAt: "2016-06-07T21:07:29.962229872Z" Spec: Name: "hopeful_cori" TaskTemplate: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ForceUpdate: 0 Mode: Replicated: Replicas: 1 UpdateConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Mode: "vip" Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 Endpoint: Spec: Mode: "vip" Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 VirtualIPs: - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" Addr: "10.255.0.2/16" - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" Addr: "10.255.0.3/16" ImageDeleteResponseItem: type: "object" properties: Untagged: description: "The image ID of an image that was untagged" type: "string" Deleted: description: "The image ID of an image that was deleted" type: "string" ServiceUpdateResponse: type: "object" properties: Warnings: description: "Optional warning messages" type: "array" items: type: "string" example: Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" ContainerSummary: type: "object" properties: Id: description: "The ID of this container" type: "string" x-go-name: "ID" Names: description: "The names that this container has been given" type: "array" items: type: "string" Image: description: "The name of the image used when creating this container" type: "string" ImageID: description: "The ID of the image that this container was created from" type: "string" Command: description: "Command to run when starting the container" type: "string" Created: description: "When the container was created" type: "integer" format: "int64" Ports: description: "The ports exposed by this container" type: "array" items: $ref: "#/definitions/Port" SizeRw: description: "The size of files that have been created or changed by this container" type: "integer" format: "int64" SizeRootFs: description: "The total size of all the files in this container" type: "integer" format: "int64" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" State: description: "The state of this container (e.g. `Exited`)" type: "string" Status: description: "Additional human-readable status of this container (e.g. `Exit 0`)" type: "string" HostConfig: type: "object" properties: NetworkMode: type: "string" NetworkSettings: description: "A summary of the container's network settings" type: "object" properties: Networks: type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" Mounts: type: "array" items: $ref: "#/definitions/Mount" Driver: description: "Driver represents a driver (network, logging, secrets)." type: "object" required: [Name] properties: Name: description: "Name of the driver." type: "string" x-nullable: false example: "some-driver" Options: description: "Key/value map of driver-specific options." type: "object" x-nullable: false additionalProperties: type: "string" example: OptionA: "value for driver-specific option A" OptionB: "value for driver-specific option B" SecretSpec: type: "object" properties: Name: description: "User-defined name of the secret." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Data: description: | Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) data to store as secret. This field is only used to _create_ a secret, and is not returned by other endpoints. type: "string" example: "" Driver: description: | Name of the secrets driver used to fetch the secret's value from an external secret store. $ref: "#/definitions/Driver" Templating: description: | Templating driver, if applicable Templating controls whether and how to evaluate the config payload as a template. If no driver is set, no templating is used. $ref: "#/definitions/Driver" Secret: type: "object" properties: ID: type: "string" example: "blt1owaxmitz71s9v5zh81zun" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" example: "2017-07-20T13:55:28.678958722Z" UpdatedAt: type: "string" format: "dateTime" example: "2017-07-20T13:55:28.678958722Z" Spec: $ref: "#/definitions/SecretSpec" ConfigSpec: type: "object" properties: Name: description: "User-defined name of the config." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Data: description: | Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) config data. type: "string" Templating: description: | Templating driver, if applicable Templating controls whether and how to evaluate the config payload as a template. If no driver is set, no templating is used. $ref: "#/definitions/Driver" Config: type: "object" properties: ID: type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Spec: $ref: "#/definitions/ConfigSpec" ContainerState: description: | ContainerState stores container's running state. It's part of ContainerJSONBase and will be returned by the "inspect" command. type: "object" properties: Status: description: | String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead". type: "string" enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] example: "running" Running: description: | Whether this container is running. Note that a running container can be _paused_. The `Running` and `Paused` booleans are not mutually exclusive: When pausing a container (on Linux), the freezer cgroup is used to suspend all processes in the container. Freezing the process requires the process to be running. As a result, paused containers are both `Running` _and_ `Paused`. Use the `Status` field instead to determine if a container's state is "running". type: "boolean" example: true Paused: description: "Whether this container is paused." type: "boolean" example: false Restarting: description: "Whether this container is restarting." type: "boolean" example: false OOMKilled: description: | Whether this container has been killed because it ran out of memory. type: "boolean" example: false Dead: type: "boolean" example: false Pid: description: "The process ID of this container" type: "integer" example: 1234 ExitCode: description: "The last exit code of this container" type: "integer" example: 0 Error: type: "string" StartedAt: description: "The time when this container was last started." type: "string" example: "2020-01-06T09:06:59.461876391Z" FinishedAt: description: "The time when this container last exited." type: "string" example: "2020-01-06T09:07:59.461876391Z" Health: x-nullable: true $ref: "#/definitions/Health" SystemVersion: type: "object" description: | Response of Engine API: GET "/version" properties: Platform: type: "object" required: [Name] properties: Name: type: "string" Components: type: "array" description: | Information about system components items: type: "object" x-go-name: ComponentVersion required: [Name, Version] properties: Name: description: | Name of the component type: "string" example: "Engine" Version: description: | Version of the component type: "string" x-nullable: false example: "19.03.12" Details: description: | Key/value pairs of strings with additional information about the component. These values are intended for informational purposes only, and their content is not defined, and not part of the API specification. These messages can be printed by the client as information to the user. type: "object" x-nullable: true Version: description: "The version of the daemon" type: "string" example: "19.03.12" ApiVersion: description: | The default (and highest) API version that is supported by the daemon type: "string" example: "1.40" MinAPIVersion: description: | The minimum API version that is supported by the daemon type: "string" example: "1.12" GitCommit: description: | The Git commit of the source code that was used to build the daemon type: "string" example: "48a66213fe" GoVersion: description: | The version Go used to compile the daemon, and the version of the Go runtime in use. type: "string" example: "go1.13.14" Os: description: | The operating system that the daemon is running on ("linux" or "windows") type: "string" example: "linux" Arch: description: | The architecture that the daemon is running on type: "string" example: "amd64" KernelVersion: description: | The kernel version (`uname -r`) that the daemon is running on. This field is omitted when empty. type: "string" example: "4.19.76-linuxkit" Experimental: description: | Indicates if the daemon is started with experimental features enabled. This field is omitted when empty / false. type: "boolean" example: true BuildTime: description: | The date and time that the daemon was compiled. type: "string" example: "2020-06-22T15:49:27.000000000+00:00" SystemInfo: type: "object" properties: ID: description: | Unique identifier of the daemon. <p><br /></p> > **Note**: The format of the ID itself is not part of the API, and > should not be considered stable. type: "string" example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" Containers: description: "Total number of containers on the host." type: "integer" example: 14 ContainersRunning: description: | Number of containers with status `"running"`. type: "integer" example: 3 ContainersPaused: description: | Number of containers with status `"paused"`. type: "integer" example: 1 ContainersStopped: description: | Number of containers with status `"stopped"`. type: "integer" example: 10 Images: description: | Total number of images on the host. Both _tagged_ and _untagged_ (dangling) images are counted. type: "integer" example: 508 Driver: description: "Name of the storage driver in use." type: "string" example: "overlay2" DriverStatus: description: | Information specific to the storage driver, provided as "label" / "value" pairs. This information is provided by the storage driver, and formatted in a way consistent with the output of `docker info` on the command line. <p><br /></p> > **Note**: The information returned in this field, including the > formatting of values and labels, should not be considered stable, > and may change without notice. type: "array" items: type: "array" items: type: "string" example: - ["Backing Filesystem", "extfs"] - ["Supports d_type", "true"] - ["Native Overlay Diff", "true"] DockerRootDir: description: | Root directory of persistent Docker state. Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` on Windows. type: "string" example: "/var/lib/docker" Plugins: $ref: "#/definitions/PluginsInfo" MemoryLimit: description: "Indicates if the host has memory limit support enabled." type: "boolean" example: true SwapLimit: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true KernelMemory: description: | Indicates if the host has kernel memory limit support enabled. <p><br /></p> > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated > `kmem.limit_in_bytes`. type: "boolean" example: true CpuCfsPeriod: description: | Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host. type: "boolean" example: true CpuCfsQuota: description: | Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host. type: "boolean" example: true CPUShares: description: | Indicates if CPU Shares limiting is supported by the host. type: "boolean" example: true CPUSet: description: | Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) type: "boolean" example: true PidsLimit: description: "Indicates if the host kernel has PID limit support enabled." type: "boolean" example: true OomKillDisable: description: "Indicates if OOM killer disable is supported on the host." type: "boolean" IPv4Forwarding: description: "Indicates IPv4 forwarding is enabled." type: "boolean" example: true BridgeNfIptables: description: "Indicates if `bridge-nf-call-iptables` is available on the host." type: "boolean" example: true BridgeNfIp6tables: description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." type: "boolean" example: true Debug: description: | Indicates if the daemon is running in debug-mode / with debug-level logging enabled. type: "boolean" example: true NFd: description: | The total number of file Descriptors in use by the daemon process. This information is only returned if debug-mode is enabled. type: "integer" example: 64 NGoroutines: description: | The number of goroutines that currently exist. This information is only returned if debug-mode is enabled. type: "integer" example: 174 SystemTime: description: | Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" example: "2017-08-08T20:28:29.06202363Z" LoggingDriver: description: | The logging driver to use as a default for new containers. type: "string" CgroupDriver: description: | The driver to use for managing cgroups. type: "string" enum: ["cgroupfs", "systemd", "none"] default: "cgroupfs" example: "cgroupfs" CgroupVersion: description: | The version of the cgroup. type: "string" enum: ["1", "2"] default: "1" example: "1" NEventsListener: description: "Number of event listeners subscribed." type: "integer" example: 30 KernelVersion: description: | Kernel version of the host. On Linux, this information obtained from `uname`. On Windows this information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd> registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. type: "string" example: "4.9.38-moby" OperatingSystem: description: | Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" or "Windows Server 2016 Datacenter" type: "string" example: "Alpine Linux v3.5" OSVersion: description: | Version of the host's operating system <p><br /></p> > **Note**: The information returned in this field, including its > very existence, and the formatting of values, should not be considered > stable, and may change without notice. type: "string" example: "16.04" OSType: description: | Generic type of the operating system of the host, as returned by the Go runtime (`GOOS`). Currently returned values are "linux" and "windows". A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). type: "string" example: "linux" Architecture: description: | Hardware architecture of the host, as returned by the Go runtime (`GOARCH`). A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). type: "string" example: "x86_64" NCPU: description: | The number of logical CPUs usable by the daemon. The number of available CPUs is checked by querying the operating system when the daemon starts. Changes to operating system CPU allocation after the daemon is started are not reflected. type: "integer" example: 4 MemTotal: description: | Total amount of physical memory available on the host, in bytes. type: "integer" format: "int64" example: 2095882240 IndexServerAddress: description: | Address / URL of the index server that is used for image search, and as a default for user authentication for Docker Hub and Docker Cloud. default: "https://index.docker.io/v1/" type: "string" example: "https://index.docker.io/v1/" RegistryConfig: $ref: "#/definitions/RegistryServiceConfig" GenericResources: $ref: "#/definitions/GenericResources" HttpProxy: description: | HTTP-proxy configured for the daemon. This value is obtained from the [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL are masked in the API response. Containers do not automatically inherit this configuration. type: "string" example: "http://xxxxx:[email protected]:8080" HttpsProxy: description: | HTTPS-proxy configured for the daemon. This value is obtained from the [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL are masked in the API response. Containers do not automatically inherit this configuration. type: "string" example: "https://xxxxx:[email protected]:4443" NoProxy: description: | Comma-separated list of domain extensions for which no proxy should be used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Containers do not automatically inherit this configuration. type: "string" example: "*.local, 169.254/16" Name: description: "Hostname of the host." type: "string" example: "node5.corp.example.com" Labels: description: | User-defined labels (key/value metadata) as set on the daemon. <p><br /></p> > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, > set through the daemon configuration, and _node_ labels, set from a > manager node in the Swarm. Node labels are not included in this > field. Node labels can be retrieved using the `/nodes/(id)` endpoint > on a manager node in the Swarm. type: "array" items: type: "string" example: ["storage=ssd", "production"] ExperimentalBuild: description: | Indicates if experimental features are enabled on the daemon. type: "boolean" example: true ServerVersion: description: | Version string of the daemon. > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) > returns the Swarm version instead of the daemon version, for example > `swarm/1.2.8`. type: "string" example: "17.06.0-ce" ClusterStore: description: | URL of the distributed storage backend. The storage backend is used for multihost networking (to store network and endpoint information) and by the node discovery mechanism. <p><br /></p> > **Deprecated**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. type: "string" example: "consul://consul.corp.example.com:8600/some/path" ClusterAdvertise: description: | The network endpoint that the Engine advertises for the purpose of node discovery. ClusterAdvertise is a `host:port` combination on which the daemon is reachable by other hosts. <p><br /></p> > **Deprecated**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. type: "string" example: "node5.corp.example.com:8000" Runtimes: description: | List of [OCI compliant](https://github.com/opencontainers/runtime-spec) runtimes configured on the daemon. Keys hold the "name" used to reference the runtime. The Docker daemon relies on an OCI compliant runtime (invoked via the `containerd` daemon) as its interface to the Linux kernel namespaces, cgroups, and SELinux. The default runtime is `runc`, and automatically configured. Additional runtimes can be configured by the user and will be listed here. type: "object" additionalProperties: $ref: "#/definitions/Runtime" default: runc: path: "runc" example: runc: path: "runc" runc-master: path: "/go/bin/runc" custom: path: "/usr/local/bin/my-oci-runtime" runtimeArgs: ["--debug", "--systemd-cgroup=false"] DefaultRuntime: description: | Name of the default OCI runtime that is used when starting containers. The default can be overridden per-container at create time. type: "string" default: "runc" example: "runc" Swarm: $ref: "#/definitions/SwarmInfo" LiveRestoreEnabled: description: | Indicates if live restore is enabled. If enabled, containers are kept running when the daemon is shutdown or upon daemon start if running containers are detected. type: "boolean" default: false example: false Isolation: description: | Represents the isolation technology to use as a default for containers. The supported values are platform-specific. If no isolation value is specified on daemon start, on Windows client, the default is `hyperv`, and on Windows server, the default is `process`. This option is currently not used on other platforms. default: "default" type: "string" enum: - "default" - "hyperv" - "process" InitBinary: description: | Name and, optional, path of the `docker-init` binary. If the path is omitted, the daemon searches the host's `$PATH` for the binary and uses the first result. type: "string" example: "docker-init" ContainerdCommit: $ref: "#/definitions/Commit" RuncCommit: $ref: "#/definitions/Commit" InitCommit: $ref: "#/definitions/Commit" SecurityOptions: description: | List of security features that are enabled on the daemon, such as apparmor, seccomp, SELinux, user-namespaces (userns), and rootless. Additional configuration options for each security feature may be present, and are included as a comma-separated list of key/value pairs. type: "array" items: type: "string" example: - "name=apparmor" - "name=seccomp,profile=default" - "name=selinux" - "name=userns" - "name=rootless" ProductLicense: description: | Reports a summary of the product license on the daemon. If a commercial license has been applied to the daemon, information such as number of nodes, and expiration are included. type: "string" example: "Community Engine" DefaultAddressPools: description: | List of custom default address pools for local networks, which can be specified in the daemon.json file or dockerd option. Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 10.10.[0-255].0/24 address pools. type: "array" items: type: "object" properties: Base: description: "The network address in CIDR format" type: "string" example: "10.10.0.0/16" Size: description: "The network pool size" type: "integer" example: "24" Warnings: description: | List of warnings / informational messages about missing features, or issues related to the daemon configuration. These messages can be printed by the client as information to the user. type: "array" items: type: "string" example: - "WARNING: No memory limit support" - "WARNING: bridge-nf-call-iptables is disabled" - "WARNING: bridge-nf-call-ip6tables is disabled" # PluginsInfo is a temp struct holding Plugins name # registered with docker daemon. It is used by Info struct PluginsInfo: description: | Available plugins per type. <p><br /></p> > **Note**: Only unmanaged (V1) plugins are included in this list. > V1 plugins are "lazily" loaded, and are not returned in this list > if there is no resource using the plugin. type: "object" properties: Volume: description: "Names of available volume-drivers, and network-driver plugins." type: "array" items: type: "string" example: ["local"] Network: description: "Names of available network-drivers, and network-driver plugins." type: "array" items: type: "string" example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] Authorization: description: "Names of available authorization plugins." type: "array" items: type: "string" example: ["img-authz-plugin", "hbm"] Log: description: "Names of available logging-drivers, and logging-driver plugins." type: "array" items: type: "string" example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] RegistryServiceConfig: description: | RegistryServiceConfig stores daemon registry services configuration. type: "object" x-nullable: true properties: AllowNondistributableArtifactsCIDRs: description: | List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). Some images (for example, Windows base images) contain artifacts whose distribution is restricted by license. When these images are pushed to a registry, restricted artifacts are not included. This configuration override this behavior, and enables the daemon to push nondistributable artifacts to all registries whose resolved IP address is within the subnet described by the CIDR syntax. This option is useful when pushing images containing nondistributable artifacts to a registry on an air-gapped network so hosts on that network can pull the images without connecting to another server. > **Warning**: Nondistributable artifacts typically have restrictions > on how and where they can be distributed and shared. Only use this > feature to push artifacts to private registries and ensure that you > are in compliance with any terms that cover redistributing > nondistributable artifacts. type: "array" items: type: "string" example: ["::1/128", "127.0.0.0/8"] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `<hostname>[:<port>]` or `<IP address>[:<port>]`. Some images (for example, Windows base images) contain artifacts whose distribution is restricted by license. When these images are pushed to a registry, restricted artifacts are not included. This configuration override this behavior for the specified registries. This option is useful when pushing images containing nondistributable artifacts to a registry on an air-gapped network so hosts on that network can pull the images without connecting to another server. > **Warning**: Nondistributable artifacts typically have restrictions > on how and where they can be distributed and shared. Only use this > feature to push artifacts to private registries and ensure that you > are in compliance with any terms that cover redistributing > nondistributable artifacts. type: "array" items: type: "string" example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. By default, local registries (`127.0.0.0/8`) are configured as insecure. All other registries are secure. Communicating with an insecure registry is not possible if the daemon assumes that registry is secure. This configuration override this behavior, insecure communication with registries whose resolved IP address is within the subnet described by the CIDR syntax. Registries can also be marked insecure by hostname. Those registries are listed under `IndexConfigs` and have their `Secure` field set to `false`. > **Warning**: Using this option can be useful when running a local > registry, but introduces security vulnerabilities. This option > should therefore ONLY be used for testing purposes. For increased > security, users should add their CA to their system's list of trusted > CAs instead of enabling this option. type: "array" items: type: "string" example: ["::1/128", "127.0.0.0/8"] IndexConfigs: type: "object" additionalProperties: $ref: "#/definitions/IndexInfo" example: "127.0.0.1:5000": "Name": "127.0.0.1:5000" "Mirrors": [] "Secure": false "Official": false "[2001:db8:a0b:12f0::1]:80": "Name": "[2001:db8:a0b:12f0::1]:80" "Mirrors": [] "Secure": false "Official": false "docker.io": Name: "docker.io" Mirrors: ["https://hub-mirror.corp.example.com:5000/"] Secure: true Official: true "registry.internal.corp.example.com:3000": Name: "registry.internal.corp.example.com:3000" Mirrors: [] Secure: false Official: false Mirrors: description: | List of registry URLs that act as a mirror for the official (`docker.io`) registry. type: "array" items: type: "string" example: - "https://hub-mirror.corp.example.com:5000/" - "https://[2001:db8:a0b:12f0::1]/" IndexInfo: description: IndexInfo contains information about a registry. type: "object" x-nullable: true properties: Name: description: | Name of the registry, such as "docker.io". type: "string" example: "docker.io" Mirrors: description: | List of mirrors, expressed as URIs. type: "array" items: type: "string" example: - "https://hub-mirror.corp.example.com:5000/" - "https://registry-2.docker.io/" - "https://registry-3.docker.io/" Secure: description: | Indicates if the registry is part of the list of insecure registries. If `false`, the registry is insecure. Insecure registries accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. > **Warning**: Insecure registries can be useful when running a local > registry. However, because its use creates security vulnerabilities > it should ONLY be enabled for testing purposes. For increased > security, users should add their CA to their system's list of > trusted CAs instead of enabling this option. type: "boolean" example: true Official: description: | Indicates whether this is an official registry (i.e., Docker Hub / docker.io) type: "boolean" example: true Runtime: description: | Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) runtime. The runtime is invoked by the daemon via the `containerd` daemon. OCI runtimes act as an interface to the Linux kernel namespaces, cgroups, and SELinux. type: "object" properties: path: description: | Name and, optional, path, of the OCI executable binary. If the path is omitted, the daemon searches the host's `$PATH` for the binary and uses the first result. type: "string" example: "/usr/local/bin/my-oci-runtime" runtimeArgs: description: | List of command-line arguments to pass to the runtime when invoked. type: "array" x-nullable: true items: type: "string" example: ["--debug", "--systemd-cgroup=false"] Commit: description: | Commit holds the Git-commit (SHA1) that a binary was built from, as reported in the version-string of external tools, such as `containerd`, or `runC`. type: "object" properties: ID: description: "Actual commit ID of external tool." type: "string" example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" Expected: description: | Commit ID of external tool expected by dockerd as set at build time. type: "string" example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" SwarmInfo: description: | Represents generic information about swarm. type: "object" properties: NodeID: description: "Unique identifier of for this node in the swarm." type: "string" default: "" example: "k67qz4598weg5unwwffg6z1m1" NodeAddr: description: | IP address at which this node can be reached by other nodes in the swarm. type: "string" default: "" example: "10.0.0.46" LocalNodeState: $ref: "#/definitions/LocalNodeState" ControlAvailable: type: "boolean" default: false example: true Error: type: "string" default: "" RemoteManagers: description: | List of ID's and addresses of other managers in the swarm. type: "array" default: null x-nullable: true items: $ref: "#/definitions/PeerNode" example: - NodeID: "71izy0goik036k48jg985xnds" Addr: "10.0.0.158:2377" - NodeID: "79y6h1o4gv8n120drcprv5nmc" Addr: "10.0.0.159:2377" - NodeID: "k67qz4598weg5unwwffg6z1m1" Addr: "10.0.0.46:2377" Nodes: description: "Total number of nodes in the swarm." type: "integer" x-nullable: true example: 4 Managers: description: "Total number of managers in the swarm." type: "integer" x-nullable: true example: 3 Cluster: $ref: "#/definitions/ClusterInfo" LocalNodeState: description: "Current local status of this node." type: "string" default: "" enum: - "" - "inactive" - "pending" - "active" - "error" - "locked" example: "active" PeerNode: description: "Represents a peer-node in the swarm" properties: NodeID: description: "Unique identifier of for this node in the swarm." type: "string" Addr: description: | IP address and ports at which this node can be reached. type: "string" NetworkAttachmentConfig: description: | Specifies how a service should be attached to a particular network. type: "object" properties: Target: description: | The target network for attachment. Must be a network name or ID. type: "string" Aliases: description: | Discoverable alternate names for the service on this network. type: "array" items: type: "string" DriverOpts: description: | Driver attachment options for the network target. type: "object" additionalProperties: type: "string" EventActor: description: | Actor describes something that generates events, like a container, network, or a volume. type: "object" properties: ID: description: "The ID of the object emitting the event" type: "string" example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" Attributes: description: | Various key/value attributes of the object, depending on its type. type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-label-value" image: "alpine:latest" name: "my-container" EventMessage: description: | EventMessage represents the information an event contains. type: "object" title: "SystemEventsResponse" properties: Type: description: "The type of object emitting the event" type: "string" enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] example: "container" Action: description: "The type of event" type: "string" example: "create" Actor: $ref: "#/definitions/EventActor" scope: description: | Scope of the event. Engine events are `local` scope. Cluster (Swarm) events are `swarm` scope. type: "string" enum: ["local", "swarm"] time: description: "Timestamp of event" type: "integer" format: "int64" example: 1629574695 timeNano: description: "Timestamp of event, with nanosecond accuracy" type: "integer" format: "int64" example: 1629574695515050031 OCIDescriptor: type: "object" x-go-name: Descriptor description: | A descriptor struct containing digest, media type, and size, as defined in the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). properties: mediaType: description: | The media type of the object this schema refers to. type: "string" example: "application/vnd.docker.distribution.manifest.v2+json" digest: description: | The digest of the targeted content. type: "string" example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" size: description: | The size in bytes of the blob. type: "integer" format: "int64" example: 3987495 # TODO Not yet including these fields for now, as they are nil / omitted in our response. # urls: # description: | # List of URLs from which this object MAY be downloaded. # type: "array" # items: # type: "string" # format: "uri" # annotations: # description: | # Arbitrary metadata relating to the targeted content. # type: "object" # additionalProperties: # type: "string" # platform: # $ref: "#/definitions/OCIPlatform" OCIPlatform: type: "object" x-go-name: Platform description: | Describes the platform which the image in the manifest runs on, as defined in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). properties: architecture: description: | The CPU architecture, for example `amd64` or `ppc64`. type: "string" example: "arm" os: description: | The operating system, for example `linux` or `windows`. type: "string" example: "windows" os.version: description: | Optional field specifying the operating system version, for example on Windows `10.0.19041.1165`. type: "string" example: "10.0.19041.1165" os.features: description: | Optional field specifying an array of strings, each listing a required OS feature (for example on Windows `win32k`). type: "array" items: type: "string" example: - "win32k" variant: description: | Optional field specifying a variant of the CPU, for example `v7` to specify ARMv7 when architecture is `arm`. type: "string" example: "v7" DistributionInspect: type: "object" x-go-name: DistributionInspect title: "DistributionInspectResponse" required: [Descriptor, Platforms] description: | Describes the result obtained from contacting the registry to retrieve image metadata. properties: Descriptor: $ref: "#/definitions/OCIDescriptor" Platforms: type: "array" description: | An array containing all platforms supported by the image. items: $ref: "#/definitions/OCIPlatform" paths: /containers/json: get: summary: "List containers" description: | Returns a list of containers. For details on the format, see the [inspect endpoint](#operation/ContainerInspect). Note that it uses a different, smaller representation of a container than inspecting a single container. For example, the list of linked containers is not propagated . operationId: "ContainerList" produces: - "application/json" parameters: - name: "all" in: "query" description: | Return all containers. By default, only running containers are shown. type: "boolean" default: false - name: "limit" in: "query" description: | Return this number of most recently created containers, including non-running ones. type: "integer" - name: "size" in: "query" description: | Return the size of container as fields `SizeRw` and `SizeRootFs`. type: "boolean" default: false - name: "filters" in: "query" description: | Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`) - `before`=(`<container id>` or `<container name>`) - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) - `exited=<int>` containers with exit code of `<int>` - `health`=(`starting`|`healthy`|`unhealthy`|`none`) - `id=<ID>` a container's ID - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) - `is-task=`(`true`|`false`) - `label=key` or `label="key=value"` of a container label - `name=<name>` a container's name - `network`=(`<network id>` or `<network name>`) - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) - `since`=(`<container id>` or `<container name>`) - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) - `volume`=(`<volume name>` or `<mount point destination>`) type: "string" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/ContainerSummary" examples: application/json: - Id: "8dfafdbc3a40" Names: - "/boring_feynman" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 1" Created: 1367854155 State: "Exited" Status: "Exit 0" Ports: - PrivatePort: 2222 PublicPort: 3333 Type: "tcp" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" Gateway: "172.17.0.1" IPAddress: "172.17.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:02" Mounts: - Name: "fac362...80535" Source: "/data" Destination: "/data" Driver: "local" Mode: "ro,Z" RW: false Propagation: "" - Id: "9cd87474be90" Names: - "/coolName" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 222222" Created: 1367854155 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" Gateway: "172.17.0.1" IPAddress: "172.17.0.8" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:08" Mounts: [] - Id: "3176a2479c92" Names: - "/sleepy_dog" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 3333333333333333" Created: 1367854154 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" Gateway: "172.17.0.1" IPAddress: "172.17.0.6" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:06" Mounts: [] - Id: "4cb07b47f9fb" Names: - "/running_cat" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 444444444444444444444444444444444" Created: 1367854152 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" Gateway: "172.17.0.1" IPAddress: "172.17.0.5" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:05" Mounts: [] 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /containers/create: post: summary: "Create a container" operationId: "ContainerCreate" consumes: - "application/json" - "application/octet-stream" produces: - "application/json" parameters: - name: "name" in: "query" description: | Assign the specified name to the container. Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. type: "string" pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - name: "body" in: "body" description: "Container to create" schema: allOf: - $ref: "#/definitions/ContainerConfig" - type: "object" properties: HostConfig: $ref: "#/definitions/HostConfig" NetworkingConfig: $ref: "#/definitions/NetworkingConfig" example: Hostname: "" Domainname: "" User: "" AttachStdin: false AttachStdout: true AttachStderr: true Tty: false OpenStdin: false StdinOnce: false Env: - "FOO=bar" - "BAZ=quux" Cmd: - "date" Entrypoint: "" Image: "ubuntu" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" Volumes: /volumes/data: {} WorkingDir: "" NetworkDisabled: false MacAddress: "12:34:56:78:9a:bc" ExposedPorts: 22/tcp: {} StopSignal: "SIGTERM" StopTimeout: 10 HostConfig: Binds: - "/tmp:/tmp" Links: - "redis3:redis" Memory: 0 MemorySwap: 0 MemoryReservation: 0 KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 CpuPeriod: 100000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 CpuQuota: 50000 CpusetCpus: "0,1" CpusetMems: "0,1" MaximumIOps: 0 MaximumIOBps: 0 BlkioWeight: 300 BlkioWeightDevice: - {} BlkioDeviceReadBps: - {} BlkioDeviceReadIOps: - {} BlkioDeviceWriteBps: - {} BlkioDeviceWriteIOps: - {} DeviceRequests: - Driver: "nvidia" Count: -1 DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] Capabilities: [["gpu", "nvidia", "compute"]] Options: property1: "string" property2: "string" MemorySwappiness: 60 OomKillDisable: false OomScoreAdj: 500 PidMode: "" PidsLimit: 0 PortBindings: 22/tcp: - HostPort: "11022" PublishAllPorts: false Privileged: false ReadonlyRootfs: false Dns: - "8.8.8.8" DnsOptions: - "" DnsSearch: - "" VolumesFrom: - "parent" - "other:ro" CapAdd: - "NET_ADMIN" CapDrop: - "MKNOD" GroupAdd: - "newgroup" RestartPolicy: Name: "" MaximumRetryCount: 0 AutoRemove: true NetworkMode: "bridge" Devices: [] Ulimits: - {} LogConfig: Type: "json-file" Config: {} SecurityOpt: [] StorageOpt: {} CgroupParent: "" VolumeDriver: "" ShmSize: 67108864 NetworkingConfig: EndpointsConfig: isolated_nw: IPAMConfig: IPv4Address: "172.20.30.33" IPv6Address: "2001:db8:abcd::3033" LinkLocalIPs: - "169.254.34.68" - "fe80::3468" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" required: true responses: 201: description: "Container created successfully" schema: type: "object" title: "ContainerCreateResponse" description: "OK response to ContainerCreate operation" required: [Id, Warnings] properties: Id: description: "The ID of the created container" type: "string" x-nullable: false Warnings: description: "Warnings encountered when creating the container" type: "array" x-nullable: false items: type: "string" examples: application/json: Id: "e90e34656806" Warnings: [] 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such image" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: c2ada9df5af8" 409: description: "conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /containers/{id}/json: get: summary: "Inspect a container" description: "Return low-level information about a container." operationId: "ContainerInspect" produces: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "ContainerInspectResponse" properties: Id: description: "The ID of the container" type: "string" Created: description: "The time the container was created" type: "string" Path: description: "The path to the command being run" type: "string" Args: description: "The arguments to the command being run" type: "array" items: type: "string" State: x-nullable: true $ref: "#/definitions/ContainerState" Image: description: "The container's image ID" type: "string" ResolvConfPath: type: "string" HostnamePath: type: "string" HostsPath: type: "string" LogPath: type: "string" Name: type: "string" RestartCount: type: "integer" Driver: type: "string" Platform: type: "string" MountLabel: type: "string" ProcessLabel: type: "string" AppArmorProfile: type: "string" ExecIDs: description: "IDs of exec instances that are running in the container." type: "array" items: type: "string" x-nullable: true HostConfig: $ref: "#/definitions/HostConfig" GraphDriver: $ref: "#/definitions/GraphDriverData" SizeRw: description: | The size of files that have been created or changed by this container. type: "integer" format: "int64" SizeRootFs: description: "The total size of all the files in this container." type: "integer" format: "int64" Mounts: type: "array" items: $ref: "#/definitions/MountPoint" Config: $ref: "#/definitions/ContainerConfig" NetworkSettings: $ref: "#/definitions/NetworkSettings" examples: application/json: AppArmorProfile: "" Args: - "-c" - "exit 9" Config: AttachStderr: true AttachStdin: false AttachStdout: true Cmd: - "/bin/sh" - "-c" - "exit 9" Domainname: "" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Healthcheck: Test: ["CMD-SHELL", "exit 0"] Hostname: "ba033ac44011" Image: "ubuntu" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" MacAddress: "" NetworkDisabled: false OpenStdin: false StdinOnce: false Tty: false User: "" Volumes: /volumes/data: {} WorkingDir: "" StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" Driver: "devicemapper" ExecIDs: - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 BlkioWeight: 0 BlkioWeightDevice: - {} BlkioDeviceReadBps: - {} BlkioDeviceWriteBps: - {} BlkioDeviceReadIOps: - {} BlkioDeviceWriteIOps: - {} ContainerIDFile: "" CpusetCpus: "" CpusetMems: "" CpuPercent: 80 CpuShares: 0 CpuPeriod: 100000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 Devices: [] DeviceRequests: - Driver: "nvidia" Count: -1 DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] Capabilities: [["gpu", "nvidia", "compute"]] Options: property1: "string" property2: "string" IpcMode: "" LxcConf: [] Memory: 0 MemorySwap: 0 MemoryReservation: 0 KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" PidMode: "" PortBindings: {} Privileged: false ReadonlyRootfs: false PublishAllPorts: false RestartPolicy: MaximumRetryCount: 2 Name: "on-failure" LogConfig: Type: "json-file" Sysctls: net.ipv4.ip_forward: "1" Ulimits: - {} VolumeDriver: "" ShmSize: 67108864 HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" MountLabel: "" Name: "/boring_euclid" NetworkSettings: Bridge: "" SandboxID: "" HairpinMode: false LinkLocalIPv6Address: "" LinkLocalIPv6PrefixLen: 0 SandboxKey: "" EndpointID: "" Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 IPAddress: "" IPPrefixLen: 0 IPv6Gateway: "" MacAddress: "" Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" Gateway: "172.17.0.1" IPAddress: "172.17.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:12:00:02" Path: "/bin/sh" ProcessLabel: "" ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" RestartCount: 1 State: Error: "" ExitCode: 9 FinishedAt: "2015-01-06T15:47:32.080254511Z" Health: Status: "healthy" FailingStreak: 0 Log: - Start: "2019-12-22T10:59:05.6385933Z" End: "2019-12-22T10:59:05.8078452Z" ExitCode: 0 Output: "" OOMKilled: false Dead: false Paused: false Pid: 0 Restarting: false Running: true StartedAt: "2015-01-06T15:47:32.072697474Z" Status: "running" Mounts: - Name: "fac362...80535" Source: "/data" Destination: "/data" Driver: "local" Mode: "ro,Z" RW: false Propagation: "" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "size" in: "query" type: "boolean" default: false description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" tags: ["Container"] /containers/{id}/top: get: summary: "List processes running inside a container" description: | On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows. operationId: "ContainerTop" responses: 200: description: "no error" schema: type: "object" title: "ContainerTopResponse" description: "OK response to ContainerTop operation" properties: Titles: description: "The ps column titles" type: "array" items: type: "string" Processes: description: | Each process running in the container, where each is process is an array of values corresponding to the titles. type: "array" items: type: "array" items: type: "string" examples: application/json: Titles: - "UID" - "PID" - "PPID" - "C" - "STIME" - "TTY" - "TIME" - "CMD" Processes: - - "root" - "13642" - "882" - "0" - "17:03" - "pts/0" - "00:00:00" - "/bin/bash" - - "root" - "13735" - "13642" - "0" - "17:06" - "pts/0" - "00:00:00" - "sleep 10" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "ps_args" in: "query" description: "The arguments to pass to `ps`. For example, `aux`" type: "string" default: "-ef" tags: ["Container"] /containers/{id}/logs: get: summary: "Get container logs" description: | Get `stdout` and `stderr` logs from a container. Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. operationId: "ContainerLogs" responses: 200: description: | logs returned as a stream in response body. For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). Note that unlike the attach endpoint, the logs endpoint does not upgrade the connection and does not set Content-Type. schema: type: "string" format: "binary" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "until" in: "query" description: "Only return logs before this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Container"] /containers/{id}/changes: get: summary: "Get changes on a container’s filesystem" description: | Returns which files in a container's filesystem have been added, deleted, or modified. The `Kind` of modification can be one of: - `0`: Modified - `1`: Added - `2`: Deleted operationId: "ContainerChanges" produces: ["application/json"] responses: 200: description: "The list of changes" schema: type: "array" items: type: "object" x-go-name: "ContainerChangeResponseItem" title: "ContainerChangeResponseItem" description: "change item in response to ContainerChanges operation" required: [Path, Kind] properties: Path: description: "Path to file that has changed" type: "string" x-nullable: false Kind: description: "Kind of change" type: "integer" format: "uint8" enum: [0, 1, 2] x-nullable: false examples: application/json: - Path: "/dev" Kind: 0 - Path: "/dev/kmsg" Kind: 1 - Path: "/test" Kind: 1 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/export: get: summary: "Export a container" description: "Export the contents of a container as a tarball." operationId: "ContainerExport" produces: - "application/octet-stream" responses: 200: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/stats: get: summary: "Get container stats based on resource usage" description: | This endpoint returns a live stream of a container’s resource usage statistics. The `precpu_stats` is the CPU statistic of the *previous* read, and is used to calculate the CPU usage percentage. It is not an exact copy of the `cpu_stats` field. If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is nil then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. On a cgroup v2 host, the following fields are not set * `blkio_stats`: all fields other than `io_service_bytes_recursive` * `cpu_stats`: `cpu_usage.percpu_usage` * `memory_stats`: `max_usage` and `failcnt` Also, `memory_stats.stats` fields are incompatible with cgroup v1. To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: * used_memory = `memory_stats.usage - memory_stats.stats.cache` * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` operationId: "ContainerStats" produces: ["application/json"] responses: 200: description: "no error" schema: type: "object" examples: application/json: read: "2015-01-08T22:57:31.547920715Z" pids_stats: current: 3 networks: eth0: rx_bytes: 5338 rx_dropped: 0 rx_errors: 0 rx_packets: 36 tx_bytes: 648 tx_dropped: 0 tx_errors: 0 tx_packets: 8 eth5: rx_bytes: 4641 rx_dropped: 0 rx_errors: 0 rx_packets: 26 tx_bytes: 690 tx_dropped: 0 tx_errors: 0 tx_packets: 9 memory_stats: stats: total_pgmajfault: 0 cache: 0 mapped_file: 0 total_inactive_file: 0 pgpgout: 414 rss: 6537216 total_mapped_file: 0 writeback: 0 unevictable: 0 pgpgin: 477 total_unevictable: 0 pgmajfault: 0 total_rss: 6537216 total_rss_huge: 6291456 total_writeback: 0 total_inactive_anon: 0 rss_huge: 6291456 hierarchical_memory_limit: 67108864 total_pgfault: 964 total_active_file: 0 active_anon: 6537216 total_active_anon: 6537216 total_pgpgout: 414 total_cache: 0 inactive_anon: 0 active_file: 0 pgfault: 964 inactive_file: 0 total_pgpgin: 477 max_usage: 6651904 usage: 6537216 failcnt: 0 limit: 67108864 blkio_stats: {} cpu_stats: cpu_usage: percpu_usage: - 8646879 - 24472255 - 36438778 - 30657443 usage_in_usermode: 50000000 total_usage: 100215355 usage_in_kernelmode: 30000000 system_cpu_usage: 739306590000000 online_cpus: 4 throttling_data: periods: 0 throttled_periods: 0 throttled_time: 0 precpu_stats: cpu_usage: percpu_usage: - 8646879 - 24350896 - 36438778 - 30657443 usage_in_usermode: 50000000 total_usage: 100093996 usage_in_kernelmode: 30000000 system_cpu_usage: 9492140000000 online_cpus: 4 throttling_data: periods: 0 throttled_periods: 0 throttled_time: 0 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "stream" in: "query" description: | Stream the output. If false, the stats will be output once and then it will disconnect. type: "boolean" default: true - name: "one-shot" in: "query" description: | Only get a single stat instead of waiting for 2 cycles. Must be used with `stream=false`. type: "boolean" default: false tags: ["Container"] /containers/{id}/resize: post: summary: "Resize a container TTY" description: "Resize the TTY for a container." operationId: "ContainerResize" consumes: - "application/octet-stream" produces: - "text/plain" responses: 200: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "cannot resize container" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "h" in: "query" description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" description: "Width of the TTY session in characters" type: "integer" tags: ["Container"] /containers/{id}/start: post: summary: "Start a container" operationId: "ContainerStart" responses: 204: description: "no error" 304: description: "container already started" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. type: "string" tags: ["Container"] /containers/{id}/stop: post: summary: "Stop a container" operationId: "ContainerStop" responses: 204: description: "no error" 304: description: "container already stopped" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" type: "integer" tags: ["Container"] /containers/{id}/restart: post: summary: "Restart a container" operationId: "ContainerRestart" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" type: "integer" tags: ["Container"] /containers/{id}/kill: post: summary: "Kill a container" description: | Send a POSIX signal to a container, defaulting to killing to the container. operationId: "ContainerKill" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "container is not running" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "signal" in: "query" description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" type: "string" default: "SIGKILL" tags: ["Container"] /containers/{id}/update: post: summary: "Update a container" description: | Change various configuration options of a container without having to recreate it. operationId: "ContainerUpdate" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "The container has been updated." schema: type: "object" title: "ContainerUpdateResponse" description: "OK response to ContainerUpdate operation" properties: Warnings: type: "array" items: type: "string" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "update" in: "body" required: true schema: allOf: - $ref: "#/definitions/Resources" - type: "object" properties: RestartPolicy: $ref: "#/definitions/RestartPolicy" example: BlkioWeight: 300 CpuShares: 512 CpuPeriod: 100000 CpuQuota: 50000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 CpusetCpus: "0,1" CpusetMems: "0" Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" tags: ["Container"] /containers/{id}/rename: post: summary: "Rename a container" operationId: "ContainerRename" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "name already in use" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "name" in: "query" required: true description: "New name for the container" type: "string" tags: ["Container"] /containers/{id}/pause: post: summary: "Pause a container" description: | Use the freezer cgroup to suspend all processes in a container. Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the freezer cgroup the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. operationId: "ContainerPause" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/unpause: post: summary: "Unpause a container" description: "Resume a container which has been paused." operationId: "ContainerUnpause" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/attach: post: summary: "Attach to a container" description: | Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. ### Hijacking This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. This is the response from the daemon for an attach request: ``` HTTP/1.1 200 OK Content-Type: application/vnd.docker.raw-stream [STREAM] ``` After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. For example, the client sends this request to upgrade the connection: ``` POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 Upgrade: tcp Connection: Upgrade ``` The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: ``` HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp [STREAM] ``` ### Stream format When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). It is encoded on the first eight bytes like this: ```go header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} ``` `STREAM_TYPE` can be: - 0: `stdin` (is written on `stdout`) - 1: `stdout` - 2: `stderr` `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. The simplest way to implement this protocol is the following: 1. Read 8 bytes. 2. Choose `stdout` or `stderr` depending on the first byte. 3. Extract the frame size from the last four bytes. 4. Read the extracted size and output it on the correct output. 5. Goto 1. ### Stream format when using a TTY When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. operationId: "ContainerAttach" produces: - "application/vnd.docker.raw-stream" responses: 101: description: "no error, hints proxy about hijacking" 200: description: "no error, no upgrade header found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. type: "string" - name: "logs" in: "query" description: | Replay previous logs from the container. This is useful for attaching to a container that has started and you want to output everything since the container started. If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. type: "boolean" default: false - name: "stream" in: "query" description: | Stream attached streams from the time the request was made onwards. type: "boolean" default: false - name: "stdin" in: "query" description: "Attach to `stdin`" type: "boolean" default: false - name: "stdout" in: "query" description: "Attach to `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Attach to `stderr`" type: "boolean" default: false tags: ["Container"] /containers/{id}/attach/ws: get: summary: "Attach to a container via a websocket" operationId: "ContainerAttachWebsocket" responses: 101: description: "no error, hints proxy about hijacking" 200: description: "no error, no upgrade header found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`. type: "string" - name: "logs" in: "query" description: "Return logs" type: "boolean" default: false - name: "stream" in: "query" description: "Return stream" type: "boolean" default: false - name: "stdin" in: "query" description: "Attach to `stdin`" type: "boolean" default: false - name: "stdout" in: "query" description: "Attach to `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Attach to `stderr`" type: "boolean" default: false tags: ["Container"] /containers/{id}/wait: post: summary: "Wait for a container" description: "Block until a container stops, then returns the exit code." operationId: "ContainerWait" produces: ["application/json"] responses: 200: description: "The container has exit." schema: type: "object" title: "ContainerWaitResponse" description: "OK response to ContainerWait operation" required: [StatusCode] properties: StatusCode: description: "Exit code of the container" type: "integer" x-nullable: false Error: description: "container waiting error, if any" type: "object" properties: Message: description: "Details of an error" type: "string" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "condition" in: "query" description: | Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'. type: "string" default: "not-running" tags: ["Container"] /containers/{id}: delete: summary: "Remove a container" operationId: "ContainerDelete" responses: 204: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "conflict" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: | You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "v" in: "query" description: "Remove anonymous volumes associated with the container." type: "boolean" default: false - name: "force" in: "query" description: "If the container is running, kill it before removing it." type: "boolean" default: false - name: "link" in: "query" description: "Remove the specified link associated with the container." type: "boolean" default: false tags: ["Container"] /containers/{id}/archive: head: summary: "Get information about files in a container" description: | A response header `X-Docker-Container-Path-Stat` is returned, containing a base64 - encoded JSON object with some filesystem header information about the path. operationId: "ContainerArchiveInfo" responses: 200: description: "no error" headers: X-Docker-Container-Path-Stat: type: "string" description: | A base64 - encoded JSON object with some filesystem header information about the path 400: description: "Bad parameter" schema: allOf: - $ref: "#/definitions/ErrorResponse" - type: "object" properties: message: description: | The error message. Either "must specify path parameter" (path cannot be empty) or "not a directory" (path was asserted to be a directory but exists as a file). type: "string" x-nullable: false 404: description: "Container or path does not exist" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Resource in the container’s filesystem to archive." type: "string" tags: ["Container"] get: summary: "Get an archive of a filesystem resource in a container" description: "Get a tar archive of a resource in the filesystem of container id." operationId: "ContainerArchive" produces: ["application/x-tar"] responses: 200: description: "no error" 400: description: "Bad parameter" schema: allOf: - $ref: "#/definitions/ErrorResponse" - type: "object" properties: message: description: | The error message. Either "must specify path parameter" (path cannot be empty) or "not a directory" (path was asserted to be a directory but exists as a file). type: "string" x-nullable: false 404: description: "Container or path does not exist" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Resource in the container’s filesystem to archive." type: "string" tags: ["Container"] put: summary: "Extract an archive of files or folders to a directory in a container" description: "Upload a tar archive to be extracted to a path in the filesystem of container id." operationId: "PutContainerArchive" consumes: ["application/x-tar", "application/octet-stream"] responses: 200: description: "The content was extracted successfully" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 403: description: "Permission denied, the volume or container rootfs is marked as read-only." schema: $ref: "#/definitions/ErrorResponse" 404: description: "No such container or path does not exist inside the container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Path to a directory in the container to extract the archive’s contents into. " type: "string" - name: "noOverwriteDirNonDir" in: "query" description: | If `1`, `true`, or `True` then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa. type: "string" - name: "copyUIDGID" in: "query" description: | If `1`, `true`, then it will copy UID/GID maps to the dest file or dir type: "string" - name: "inputStream" in: "body" required: true description: | The input stream must be a tar archive compressed with one of the following algorithms: `identity` (no compression), `gzip`, `bzip2`, or `xz`. schema: type: "string" format: "binary" tags: ["Container"] /containers/prune: post: summary: "Delete stopped containers" produces: - "application/json" operationId: "ContainerPrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "ContainerPruneResponse" properties: ContainersDeleted: description: "Container IDs that were deleted" type: "array" items: type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /images/json: get: summary: "List Images" description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." operationId: "ImageList" produces: - "application/json" responses: 200: description: "Summary image data for the images matching the query" schema: type: "array" items: $ref: "#/definitions/ImageSummary" examples: application/json: - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" ParentId: "" RepoTags: - "ubuntu:12.04" - "ubuntu:precise" RepoDigests: - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" Created: 1474925151 Size: 103579269 VirtualSize: 103579269 SharedSize: 0 Labels: {} Containers: 2 - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" ParentId: "" RepoTags: - "ubuntu:12.10" - "ubuntu:quantal" RepoDigests: - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" Created: 1403128455 Size: 172064416 VirtualSize: 172064416 SharedSize: 0 Labels: {} Containers: 5 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "all" in: "query" description: "Show all images. Only images from a final layer (no children) are shown by default." type: "boolean" default: false - name: "filters" in: "query" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) - `dangling=true` - `label=key` or `label="key=value"` of an image label - `reference`=(`<image-name>[:<tag>]`) - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) type: "string" - name: "shared-size" in: "query" description: "Compute and show shared size as a `SharedSize` field on each image." type: "boolean" default: false - name: "digests" in: "query" description: "Show digest information as a `RepoDigests` field on each image." type: "boolean" default: false tags: ["Image"] /build: post: summary: "Build an image" description: | Build an image from a tar archive with a `Dockerfile` in it. The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. The build is canceled if the client drops the connection by quitting or being killed. operationId: "ImageBuild" consumes: - "application/octet-stream" produces: - "application/json" parameters: - name: "inputStream" in: "body" description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." schema: type: "string" format: "binary" - name: "dockerfile" in: "query" description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." type: "string" default: "Dockerfile" - name: "t" in: "query" description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." type: "string" - name: "extrahosts" in: "query" description: "Extra hosts to add to /etc/hosts" type: "string" - name: "remote" in: "query" description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." type: "string" - name: "q" in: "query" description: "Suppress verbose build output." type: "boolean" default: false - name: "nocache" in: "query" description: "Do not use the cache when building the image." type: "boolean" default: false - name: "cachefrom" in: "query" description: "JSON array of images used for build cache resolution." type: "string" - name: "pull" in: "query" description: "Attempt to pull the image even if an older image exists locally." type: "string" - name: "rm" in: "query" description: "Remove intermediate containers after a successful build." type: "boolean" default: true - name: "forcerm" in: "query" description: "Always remove intermediate containers, even upon failure." type: "boolean" default: false - name: "memory" in: "query" description: "Set memory limit for build." type: "integer" - name: "memswap" in: "query" description: "Total memory (memory + swap). Set as `-1` to disable swap." type: "integer" - name: "cpushares" in: "query" description: "CPU shares (relative weight)." type: "integer" - name: "cpusetcpus" in: "query" description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." type: "string" - name: "cpuperiod" in: "query" description: "The length of a CPU period in microseconds." type: "integer" - name: "cpuquota" in: "query" description: "Microseconds of CPU time that the container can get in a CPU period." type: "integer" - name: "buildargs" in: "query" description: > JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) type: "string" - name: "shmsize" in: "query" description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." type: "integer" - name: "squash" in: "query" description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" type: "boolean" - name: "labels" in: "query" description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." type: "string" - name: "networkmode" in: "query" description: | Sets the networking mode for the run commands during build. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken as a custom network's name or ID to which this container should connect to. type: "string" - name: "Content-type" in: "header" type: "string" enum: - "application/x-tar" default: "application/x-tar" - name: "X-Registry-Config" in: "header" description: | This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: ``` { "docker.example.com": { "username": "janedoe", "password": "hunter2" }, "https://index.docker.io/v1/": { "username": "mobydock", "password": "conta1n3rize14" } } ``` Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. type: "string" - name: "platform" in: "query" description: "Platform in the format os[/arch[/variant]]" type: "string" default: "" - name: "target" in: "query" description: "Target build stage" type: "string" default: "" - name: "outputs" in: "query" description: "BuildKit output configuration" type: "string" default: "" responses: 200: description: "no error" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /build/prune: post: summary: "Delete builder cache" produces: - "application/json" operationId: "BuildPrune" parameters: - name: "keep-storage" in: "query" description: "Amount of disk space in bytes to keep for cache" type: "integer" format: "int64" - name: "all" in: "query" type: "boolean" description: "Remove all types of build cache" - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters: - `until=<duration>`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') - `id=<id>` - `parent=<id>` - `type=<string>` - `description=<string>` - `inuse` - `shared` - `private` responses: 200: description: "No error" schema: type: "object" title: "BuildPruneResponse" properties: CachesDeleted: type: "array" items: description: "ID of build cache object" type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /images/create: post: summary: "Create an image" description: "Create an image by either pulling it from a registry or importing it." operationId: "ImageCreate" consumes: - "text/plain" - "application/octet-stream" produces: - "application/json" responses: 200: description: "no error" 404: description: "repository does not exist or no read access" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "fromImage" in: "query" description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." type: "string" - name: "fromSrc" in: "query" description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." type: "string" - name: "repo" in: "query" description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." type: "string" - name: "tag" in: "query" description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." type: "string" - name: "message" in: "query" description: "Set commit message for imported image." type: "string" - name: "inputImage" in: "body" description: "Image content if the value `-` has been specified in fromSrc query parameter" schema: type: "string" required: false - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "changes" in: "query" description: | Apply `Dockerfile` instructions to the image that is created, for example: `changes=ENV DEBUG=true`. Note that `ENV DEBUG=true` should be URI component encoded. Supported `Dockerfile` instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` type: "array" items: type: "string" - name: "platform" in: "query" description: "Platform in the format os[/arch[/variant]]" type: "string" default: "" tags: ["Image"] /images/{name}/json: get: summary: "Inspect an image" description: "Return low-level information about an image." operationId: "ImageInspect" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/Image" examples: application/json: Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" Comment: "" Os: "linux" Architecture: "amd64" Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" ContainerConfig: Tty: false Hostname: "e611e15f9c9d" Domainname: "" AttachStdout: false PublishService: "" AttachStdin: false OpenStdin: false StdinOnce: false NetworkDisabled: false OnBuild: [] Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" User: "" WorkingDir: "" MacAddress: "" AttachStderr: false Labels: com.example.license: "GPL" com.example.version: "1.0" com.example.vendor: "Acme" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Cmd: - "/bin/sh" - "-c" - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" DockerVersion: "1.9.0-dev" VirtualSize: 188359297 Size: 0 Author: "" Created: "2015-09-10T08:30:53.26995814Z" GraphDriver: Name: "aufs" Data: {} RepoDigests: - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" RepoTags: - "example:1.0" - "example:latest" - "example:stable" Config: Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" NetworkDisabled: false OnBuild: [] StdinOnce: false PublishService: "" AttachStdin: false OpenStdin: false Domainname: "" AttachStdout: false Tty: false Hostname: "e611e15f9c9d" Cmd: - "/bin/bash" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Labels: com.example.vendor: "Acme" com.example.version: "1.0" com.example.license: "GPL" MacAddress: "" AttachStderr: false WorkingDir: "" User: "" RootFS: Type: "layers" Layers: - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: someimage (tag: latest)" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or id" type: "string" required: true tags: ["Image"] /images/{name}/history: get: summary: "Get the history of an image" description: "Return parent layers of an image." operationId: "ImageHistory" produces: ["application/json"] responses: 200: description: "List of image layers" schema: type: "array" items: type: "object" x-go-name: HistoryResponseItem title: "HistoryResponseItem" description: "individual image layer information in response to ImageHistory operation" required: [Id, Created, CreatedBy, Tags, Size, Comment] properties: Id: type: "string" x-nullable: false Created: type: "integer" format: "int64" x-nullable: false CreatedBy: type: "string" x-nullable: false Tags: type: "array" items: type: "string" Size: type: "integer" format: "int64" x-nullable: false Comment: type: "string" x-nullable: false examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" Created: 1398108230 CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" Tags: - "ubuntu:lucid" - "ubuntu:10.04" Size: 182964289 Comment: "" - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" Created: 1398108222 CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <[email protected]> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" Tags: [] Size: 0 Comment: "" - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" Created: 1371157430 CreatedBy: "" Tags: - "scratch12:latest" - "scratch:latest" Size: 0 Comment: "Imported from -" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true tags: ["Image"] /images/{name}/push: post: summary: "Push an image" description: | Push an image to a registry. If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. The push is cancelled if the HTTP connection is closed. operationId: "ImagePush" consumes: - "application/octet-stream" responses: 200: description: "No error" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID." type: "string" required: true - name: "tag" in: "query" description: "The tag to associate with the image on the registry." type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration. Refer to the [authentication section](#section/Authentication) for details. type: "string" required: true tags: ["Image"] /images/{name}/tag: post: summary: "Tag an image" description: "Tag an image so that it becomes part of a repository." operationId: "ImageTag" responses: 201: description: "No error" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID to tag." type: "string" required: true - name: "repo" in: "query" description: "The repository to tag in. For example, `someuser/someimage`." type: "string" - name: "tag" in: "query" description: "The name of the new tag." type: "string" tags: ["Image"] /images/{name}: delete: summary: "Remove an image" description: | Remove an image, along with any untagged parent images that were referenced by that image. Images can't be removed if they have descendant images, are being used by a running container or are being used by a build. operationId: "ImageDelete" produces: ["application/json"] responses: 200: description: "The image was deleted successfully" schema: type: "array" items: $ref: "#/definitions/ImageDeleteResponseItem" examples: application/json: - Untagged: "3e2f21a89f" - Deleted: "3e2f21a89f" - Deleted: "53b4f83ac9" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true - name: "force" in: "query" description: "Remove the image even if it is being used by stopped containers or has other tags" type: "boolean" default: false - name: "noprune" in: "query" description: "Do not delete untagged parent images" type: "boolean" default: false tags: ["Image"] /images/search: get: summary: "Search images" description: "Search for an image on Docker Hub." operationId: "ImageSearch" produces: - "application/json" responses: 200: description: "No error" schema: type: "array" items: type: "object" title: "ImageSearchResponseItem" properties: description: type: "string" is_official: type: "boolean" is_automated: type: "boolean" name: type: "string" star_count: type: "integer" examples: application/json: - description: "" is_official: false is_automated: false name: "wma55/u1210sshd" star_count: 0 - description: "" is_official: false is_automated: false name: "jdswinbank/sshd" star_count: 0 - description: "" is_official: false is_automated: false name: "vgauthier/sshd" star_count: 0 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "term" in: "query" description: "Term to search" type: "string" required: true - name: "limit" in: "query" description: "Maximum number of results to return" type: "integer" - name: "filters" in: "query" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - `is-automated=(true|false)` - `is-official=(true|false)` - `stars=<number>` Matches images that has at least 'number' stars. type: "string" tags: ["Image"] /images/prune: post: summary: "Delete unused images" produces: - "application/json" operationId: "ImagePrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `dangling=<boolean>` When set to `true` (or `1`), prune only unused *and* untagged images. When set to `false` (or `0`), all unused images are pruned. - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "ImagePruneResponse" properties: ImagesDeleted: description: "Images that were deleted" type: "array" items: $ref: "#/definitions/ImageDeleteResponseItem" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /auth: post: summary: "Check auth configuration" description: | Validate credentials for a registry and, if available, get an identity token for accessing the registry without password. operationId: "SystemAuth" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "An identity token was generated successfully." schema: type: "object" title: "SystemAuthResponse" required: [Status] properties: Status: description: "The status of the authentication" type: "string" x-nullable: false IdentityToken: description: "An opaque token used to authenticate a user after a successful login" type: "string" x-nullable: false examples: application/json: Status: "Login Succeeded" IdentityToken: "9cbaf023786cd7..." 204: description: "No error" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "authConfig" in: "body" description: "Authentication to check" schema: $ref: "#/definitions/AuthConfig" tags: ["System"] /info: get: summary: "Get system information" operationId: "SystemInfo" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/SystemInfo" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /version: get: summary: "Get version" description: "Returns the version of Docker that is running and various information about the system that Docker is running on." operationId: "SystemVersion" produces: ["application/json"] responses: 200: description: "no error" schema: $ref: "#/definitions/SystemVersion" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /_ping: get: summary: "Ping" description: "This is a dummy endpoint you can use to test if the server is accessible." operationId: "SystemPing" produces: ["text/plain"] responses: 200: description: "no error" schema: type: "string" example: "OK" headers: API-Version: type: "string" description: "Max API Version the server supports" Builder-Version: type: "string" description: "Default version of docker image builder" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" headers: Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" tags: ["System"] head: summary: "Ping" description: "This is a dummy endpoint you can use to test if the server is accessible." operationId: "SystemPingHead" produces: ["text/plain"] responses: 200: description: "no error" schema: type: "string" example: "(empty)" headers: API-Version: type: "string" description: "Max API Version the server supports" Builder-Version: type: "string" description: "Default version of docker image builder" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /commit: post: summary: "Create a new image from a container" operationId: "ImageCommit" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "containerConfig" in: "body" description: "The container configuration" schema: $ref: "#/definitions/ContainerConfig" - name: "container" in: "query" description: "The ID or name of the container to commit" type: "string" - name: "repo" in: "query" description: "Repository name for the created image" type: "string" - name: "tag" in: "query" description: "Tag name for the create image" type: "string" - name: "comment" in: "query" description: "Commit message" type: "string" - name: "author" in: "query" description: "Author of the image (e.g., `John Hannibal Smith <[email protected]>`)" type: "string" - name: "pause" in: "query" description: "Whether to pause the container before committing" type: "boolean" default: true - name: "changes" in: "query" description: "`Dockerfile` instructions to apply while committing" type: "string" tags: ["Image"] /events: get: summary: "Monitor events" description: | Stream real-time events from the server. Various objects within Docker report events when something happens to them. Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` The Docker daemon reports these events: `reload` Services report these events: `create`, `update`, and `remove` Nodes report these events: `create`, `update`, and `remove` Secrets report these events: `create`, `update`, and `remove` Configs report these events: `create`, `update`, and `remove` The Builder reports `prune` events operationId: "SystemEvents" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/EventMessage" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "since" in: "query" description: "Show events created since this timestamp then stream new events." type: "string" - name: "until" in: "query" description: "Show events created until this timestamp then stop streaming." type: "string" - name: "filters" in: "query" description: | A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: - `config=<string>` config name or ID - `container=<string>` container name or ID - `daemon=<string>` daemon name or ID - `event=<string>` event type - `image=<string>` image name or ID - `label=<string>` image or container label - `network=<string>` network name or ID - `node=<string>` node ID - `plugin`=<string> plugin name or ID - `scope`=<string> local or swarm - `secret=<string>` secret name or ID - `service=<string>` service name or ID - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` - `volume=<string>` volume name type: "string" tags: ["System"] /system/df: get: summary: "Get data usage information" operationId: "SystemDataUsage" responses: 200: description: "no error" schema: type: "object" title: "SystemDataUsageResponse" properties: LayersSize: type: "integer" format: "int64" Images: type: "array" items: $ref: "#/definitions/ImageSummary" Containers: type: "array" items: $ref: "#/definitions/ContainerSummary" Volumes: type: "array" items: $ref: "#/definitions/Volume" BuildCache: type: "array" items: $ref: "#/definitions/BuildCache" example: LayersSize: 1092588 Images: - Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" ParentId: "" RepoTags: - "busybox:latest" RepoDigests: - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" Created: 1466724217 Size: 1092588 SharedSize: 0 VirtualSize: 1092588 Labels: {} Containers: 1 Containers: - Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" Names: - "/top" Image: "busybox" ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" Command: "top" Created: 1472592424 Ports: [] SizeRootFs: 1092588 Labels: {} State: "exited" Status: "Exited (0) 56 minutes ago" HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: IPAMConfig: null Links: null Aliases: null NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" Gateway: "172.18.0.1" IPAddress: "172.18.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:12:00:02" Mounts: [] Volumes: - Name: "my-volume" Driver: "local" Mountpoint: "/var/lib/docker/volumes/my-volume/_data" Labels: null Scope: "local" Options: null UsageData: Size: 10920104 RefCount: 2 BuildCache: - ID: "hw53o5aio51xtltp5xjp8v7fx" Parent: "" Type: "regular" Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" InUse: false Shared: true Size: 0 CreatedAt: "2021-06-28T13:31:01.474619385Z" LastUsedAt: "2021-07-07T22:02:32.738075951Z" UsageCount: 26 - ID: "ndlpt0hhvkqcdfkputsk4cq9c" Parent: "hw53o5aio51xtltp5xjp8v7fx" Type: "regular" Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" InUse: false Shared: true Size: 51 CreatedAt: "2021-06-28T13:31:03.002625487Z" LastUsedAt: "2021-07-07T22:02:32.773909517Z" UsageCount: 26 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "type" in: "query" description: | Object types, for which to compute and return data. type: "array" collectionFormat: multi items: type: "string" enum: ["container", "image", "volume", "build-cache"] tags: ["System"] /images/{name}/get: get: summary: "Export an image" description: | Get a tarball containing all images and metadata for a repository. If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. ### Image tarball format An image tarball contains one directory per image layer (named using its long ID), each containing these files: - `VERSION`: currently `1.0` - the file format version - `json`: detailed layer information, similar to `docker inspect layer_id` - `layer.tar`: A tarfile containing the filesystem changes in this layer The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ```json { "hello-world": { "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" } } ``` operationId: "ImageGet" produces: - "application/x-tar" responses: 200: description: "no error" schema: type: "string" format: "binary" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true tags: ["Image"] /images/get: get: summary: "Export several images" description: | Get a tarball containing all images and metadata for several image repositories. For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageGetAll" produces: - "application/x-tar" responses: 200: description: "no error" schema: type: "string" format: "binary" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "names" in: "query" description: "Image names to filter by" type: "array" items: type: "string" tags: ["Image"] /images/load: post: summary: "Import images" description: | Load a set of images and tags into a repository. For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageLoad" consumes: - "application/x-tar" produces: - "application/json" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "imagesTarball" in: "body" description: "Tar archive containing images" schema: type: "string" format: "binary" - name: "quiet" in: "query" description: "Suppress progress details during load." type: "boolean" default: false tags: ["Image"] /containers/{id}/exec: post: summary: "Create an exec instance" description: "Run a command inside a running container." operationId: "ContainerExec" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "container is paused" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "execConfig" in: "body" description: "Exec configuration" schema: type: "object" title: "ExecConfig" properties: AttachStdin: type: "boolean" description: "Attach to `stdin` of the exec command." AttachStdout: type: "boolean" description: "Attach to `stdout` of the exec command." AttachStderr: type: "boolean" description: "Attach to `stderr` of the exec command." DetachKeys: type: "string" description: | Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Tty: type: "boolean" description: "Allocate a pseudo-TTY." Env: description: | A list of environment variables in the form `["VAR=value", ...]`. type: "array" items: type: "string" Cmd: type: "array" description: "Command to run, as a string or array of strings." items: type: "string" Privileged: type: "boolean" description: "Runs the exec process with extended privileges." default: false User: type: "string" description: | The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`. WorkingDir: type: "string" description: | The working directory for the exec process inside the container. example: AttachStdin: false AttachStdout: true AttachStderr: true DetachKeys: "ctrl-p,ctrl-q" Tty: false Cmd: - "date" Env: - "FOO=bar" - "BAZ=quux" required: true - name: "id" in: "path" description: "ID or name of container" type: "string" required: true tags: ["Exec"] /exec/{id}/start: post: summary: "Start an exec instance" description: | Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command. operationId: "ExecStart" consumes: - "application/json" produces: - "application/vnd.docker.raw-stream" responses: 200: description: "No error" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Container is stopped or paused" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "execStartConfig" in: "body" schema: type: "object" title: "ExecStartConfig" properties: Detach: type: "boolean" description: "Detach from the command." Tty: type: "boolean" description: "Allocate a pseudo-TTY." example: Detach: false Tty: false - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" tags: ["Exec"] /exec/{id}/resize: post: summary: "Resize an exec instance" description: | Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance. operationId: "ExecResize" responses: 201: description: "No error" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" - name: "h" in: "query" description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" description: "Width of the TTY session in characters" type: "integer" tags: ["Exec"] /exec/{id}/json: get: summary: "Inspect an exec instance" description: "Return low-level information about an exec instance." operationId: "ExecInspect" produces: - "application/json" responses: 200: description: "No error" schema: type: "object" title: "ExecInspectResponse" properties: CanRemove: type: "boolean" DetachKeys: type: "string" ID: type: "string" Running: type: "boolean" ExitCode: type: "integer" ProcessConfig: $ref: "#/definitions/ProcessConfig" OpenStdin: type: "boolean" OpenStderr: type: "boolean" OpenStdout: type: "boolean" ContainerID: type: "string" Pid: type: "integer" description: "The system process ID for the exec process." examples: application/json: CanRemove: false ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" DetachKeys: "" ExitCode: 2 ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" OpenStderr: true OpenStdin: true OpenStdout: true ProcessConfig: arguments: - "-c" - "exit 2" entrypoint: "sh" privileged: false tty: true user: "1000" Running: false Pid: 42000 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" tags: ["Exec"] /volumes: get: summary: "List volumes" operationId: "VolumeList" produces: ["application/json"] responses: 200: description: "Summary volume data that matches the query" schema: type: "object" title: "VolumeListResponse" description: "Volume list response" required: [Volumes, Warnings] properties: Volumes: type: "array" x-nullable: false description: "List of volumes" items: $ref: "#/definitions/Volume" Warnings: type: "array" x-nullable: false description: | Warnings that occurred when fetching the list of volumes. items: type: "string" examples: application/json: Volumes: - CreatedAt: "2017-07-19T12:00:26Z" Name: "tardis" Driver: "local" Mountpoint: "/var/lib/docker/volumes/tardis" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Scope: "local" Options: device: "tmpfs" o: "size=100m,uid=1000" type: "tmpfs" Warnings: [] 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. Available filters: - `dangling=<boolean>` When set to `true` (or `1`), returns all volumes that are not in use by a container. When set to `false` (or `0`), only volumes that are in use by one or more containers are returned. - `driver=<volume-driver-name>` Matches volumes based on their driver. - `label=<key>` or `label=<key>:<value>` Matches volumes based on the presence of a `label` alone or a `label` and a value. - `name=<volume-name>` Matches all or part of a volume name. type: "string" format: "json" tags: ["Volume"] /volumes/create: post: summary: "Create a volume" operationId: "VolumeCreate" consumes: ["application/json"] produces: ["application/json"] responses: 201: description: "The volume was created successfully" schema: $ref: "#/definitions/Volume" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "volumeConfig" in: "body" required: true description: "Volume configuration" schema: type: "object" description: "Volume configuration" title: "VolumeConfig" properties: Name: description: | The new volume's name. If not specified, Docker generates a name. type: "string" x-nullable: false Driver: description: "Name of the volume driver to use." type: "string" default: "local" x-nullable: false DriverOpts: description: | A mapping of driver options and values. These options are passed directly to the driver and are driver specific. type: "object" additionalProperties: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: Name: "tardis" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Driver: "custom" tags: ["Volume"] /volumes/{name}: get: summary: "Inspect a volume" operationId: "VolumeInspect" produces: ["application/json"] responses: 200: description: "No error" schema: $ref: "#/definitions/Volume" 404: description: "No such volume" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" required: true description: "Volume name or ID" type: "string" tags: ["Volume"] delete: summary: "Remove a volume" description: "Instruct the driver to remove the volume." operationId: "VolumeDelete" responses: 204: description: "The volume was removed" 404: description: "No such volume or volume driver" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Volume is in use and cannot be removed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" required: true description: "Volume name or ID" type: "string" - name: "force" in: "query" description: "Force the removal of the volume" type: "boolean" default: false tags: ["Volume"] /volumes/prune: post: summary: "Delete unused volumes" produces: - "application/json" operationId: "VolumePrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "VolumePruneResponse" properties: VolumesDeleted: description: "Volumes that were deleted" type: "array" items: type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Volume"] /networks: get: summary: "List networks" description: | Returns a list of networks. For details on the format, see the [network inspect endpoint](#operation/NetworkInspect). Note that it uses a different, smaller representation of a network than inspecting a single network. For example, the list of containers attached to the network is not propagated in API versions 1.28 and up. operationId: "NetworkList" produces: - "application/json" responses: 200: description: "No error" schema: type: "array" items: $ref: "#/definitions/Network" examples: application/json: - Name: "bridge" Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" Created: "2016-10-19T06:21:00.416543526Z" Scope: "local" Driver: "bridge" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: - Subnet: "172.17.0.0/16" Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" - Name: "none" Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "null" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: [] Containers: {} Options: {} - Name: "host" Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "host" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: [] Containers: {} Options: {} 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: - `dangling=<boolean>` When set to `true` (or `1`), returns all networks that are not in use by a container. When set to `false` (or `0`), only networks that are in use by one or more containers are returned. - `driver=<driver-name>` Matches a network's driver. - `id=<network-id>` Matches all or part of a network ID. - `label=<key>` or `label=<key>=<value>` of a network label. - `name=<network-name>` Matches all or part of a network name. - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. type: "string" tags: ["Network"] /networks/{id}: get: summary: "Inspect a network" operationId: "NetworkInspect" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/Network" 404: description: "Network not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "verbose" in: "query" description: "Detailed inspect output for troubleshooting" type: "boolean" default: false - name: "scope" in: "query" description: "Filter the network by scope (swarm, global, or local)" type: "string" tags: ["Network"] delete: summary: "Remove a network" operationId: "NetworkDelete" responses: 204: description: "No error" 403: description: "operation not supported for pre-defined networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such network" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" tags: ["Network"] /networks/create: post: summary: "Create a network" operationId: "NetworkCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "No error" schema: type: "object" title: "NetworkCreateResponse" properties: Id: description: "The ID of the created network." type: "string" Warning: type: "string" example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" 403: description: "operation not supported for pre-defined networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "plugin not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "networkConfig" in: "body" description: "Network configuration" required: true schema: type: "object" title: "NetworkCreateRequest" required: ["Name"] properties: Name: description: "The network's name." type: "string" CheckDuplicate: description: | Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions. type: "boolean" Driver: description: "Name of the network driver plugin to use." type: "string" default: "bridge" Internal: description: "Restrict external access to the network." type: "boolean" Attachable: description: | Globally scoped network is manually attachable by regular containers from workers in swarm mode. type: "boolean" Ingress: description: | Ingress network is the network which provides the routing-mesh in swarm mode. type: "boolean" IPAM: description: "Optional custom IP scheme for the network." $ref: "#/definitions/IPAM" EnableIPv6: description: "Enable IPv6 on the network." type: "boolean" Options: description: "Network specific options to be used by the drivers." type: "object" additionalProperties: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: Name: "isolated_nw" CheckDuplicate: false Driver: "bridge" EnableIPv6: true IPAM: Driver: "default" Config: - Subnet: "172.20.0.0/16" IPRange: "172.20.10.0/24" Gateway: "172.20.10.11" - Subnet: "2001:db8:abcd::/64" Gateway: "2001:db8:abcd::1011" Options: foo: "bar" Internal: true Attachable: false Ingress: false Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" tags: ["Network"] /networks/{id}/connect: post: summary: "Connect a container to a network" operationId: "NetworkConnect" consumes: - "application/json" responses: 200: description: "No error" 403: description: "Operation not supported for swarm scoped networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Network or container not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "container" in: "body" required: true schema: type: "object" title: "NetworkConnectRequest" properties: Container: type: "string" description: "The ID or name of the container to connect to the network." EndpointConfig: $ref: "#/definitions/EndpointSettings" example: Container: "3613f73ba0e4" EndpointConfig: IPAMConfig: IPv4Address: "172.24.56.89" IPv6Address: "2001:db8::5689" tags: ["Network"] /networks/{id}/disconnect: post: summary: "Disconnect a container from a network" operationId: "NetworkDisconnect" consumes: - "application/json" responses: 200: description: "No error" 403: description: "Operation not supported for swarm scoped networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Network or container not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "container" in: "body" required: true schema: type: "object" title: "NetworkDisconnectRequest" properties: Container: type: "string" description: | The ID or name of the container to disconnect from the network. Force: type: "boolean" description: | Force the container to disconnect from the network. tags: ["Network"] /networks/prune: post: summary: "Delete unused networks" produces: - "application/json" operationId: "NetworkPrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "NetworkPruneResponse" properties: NetworksDeleted: description: "Networks that were deleted" type: "array" items: type: "string" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Network"] /plugins: get: summary: "List plugins" operationId: "PluginList" description: "Returns information about installed plugins." produces: ["application/json"] responses: 200: description: "No error" schema: type: "array" items: $ref: "#/definitions/Plugin" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: - `capability=<capability name>` - `enable=<true>|<false>` tags: ["Plugin"] /plugins/privileges: get: summary: "Get plugin privileges" operationId: "GetPluginPrivileges" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "remote" in: "query" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: - "Plugin" /plugins/pull: post: summary: "Install a plugin" operationId: "PluginPull" description: | Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). produces: - "application/json" responses: 204: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "remote" in: "query" description: | Remote reference for plugin to install. The `:latest` tag is optional, and is used as the default if omitted. required: true type: "string" - name: "name" in: "query" description: | Local name for the pulled plugin. The `:latest` tag is optional, and is used as the default if omitted. required: false type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration to use when pulling a plugin from a registry. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "body" in: "body" schema: type: "array" items: $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" tags: ["Plugin"] /plugins/{name}/json: get: summary: "Inspect a plugin" operationId: "PluginInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Plugin" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: ["Plugin"] /plugins/{name}: delete: summary: "Remove a plugin" operationId: "PluginDelete" responses: 200: description: "no error" schema: $ref: "#/definitions/Plugin" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "force" in: "query" description: | Disable the plugin before removing. This may result in issues if the plugin is in use by a container. type: "boolean" default: false tags: ["Plugin"] /plugins/{name}/enable: post: summary: "Enable a plugin" operationId: "PluginEnable" responses: 200: description: "no error" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "timeout" in: "query" description: "Set the HTTP client timeout (in seconds)" type: "integer" default: 0 tags: ["Plugin"] /plugins/{name}/disable: post: summary: "Disable a plugin" operationId: "PluginDisable" responses: 200: description: "no error" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: ["Plugin"] /plugins/{name}/upgrade: post: summary: "Upgrade a plugin" operationId: "PluginUpgrade" responses: 204: description: "no error" 404: description: "plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "remote" in: "query" description: | Remote reference to upgrade to. The `:latest` tag is optional, and is used as the default if omitted. required: true type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration to use when pulling a plugin from a registry. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "body" in: "body" schema: type: "array" items: $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" tags: ["Plugin"] /plugins/create: post: summary: "Create a plugin" operationId: "PluginCreate" consumes: - "application/x-tar" responses: 204: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "query" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "tarContext" in: "body" description: "Path to tar containing plugin rootfs and manifest" schema: type: "string" format: "binary" tags: ["Plugin"] /plugins/{name}/push: post: summary: "Push a plugin" operationId: "PluginPush" description: | Push a plugin to the registry. parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" responses: 200: description: "no error" 404: description: "plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Plugin"] /plugins/{name}/set: post: summary: "Configure a plugin" operationId: "PluginSet" consumes: - "application/json" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "body" in: "body" schema: type: "array" items: type: "string" example: ["DEBUG=1"] responses: 204: description: "No error" 404: description: "Plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Plugin"] /nodes: get: summary: "List nodes" operationId: "NodeList" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Node" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). Available filters: - `id=<node id>` - `label=<engine label>` - `membership=`(`accepted`|`pending`)` - `name=<node name>` - `node.label=<node label>` - `role=`(`manager`|`worker`)` type: "string" tags: ["Node"] /nodes/{id}: get: summary: "Inspect a node" operationId: "NodeInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Node" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the node" type: "string" required: true tags: ["Node"] delete: summary: "Delete a node" operationId: "NodeDelete" responses: 200: description: "no error" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the node" type: "string" required: true - name: "force" in: "query" description: "Force remove a node from the swarm" default: false type: "boolean" tags: ["Node"] /nodes/{id}/update: post: summary: "Update a node" operationId: "NodeUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID of the node" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/NodeSpec" - name: "version" in: "query" description: | The version number of the node object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Node"] /swarm: get: summary: "Inspect swarm" operationId: "SwarmInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Swarm" 404: description: "no such swarm" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /swarm/init: post: summary: "Initialize a new swarm" operationId: "SwarmInit" produces: - "application/json" - "text/plain" responses: 200: description: "no error" schema: description: "The node ID" type: "string" example: "7v2t30z9blmxuhnyo6s4cpenp" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is already part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmInitRequest" properties: ListenAddr: description: | Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used. type: "string" AdvertiseAddr: description: | Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`, or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is possible to separate the container data traffic from the management traffic of the cluster. type: "string" DataPathPort: description: | DataPathPort specifies the data path port number for data traffic. Acceptable port range is 1024 to 49151. if no port is set or is set to 0, default port 4789 will be used. type: "integer" format: "uint32" DefaultAddrPool: description: | Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" example: ["10.10.0.0/16", "20.20.0.0/16"] ForceNewCluster: description: "Force creation of a new swarm." type: "boolean" SubnetSize: description: | SubnetSize specifies the subnet size of the networks created from the default subnet pool. type: "integer" format: "uint32" Spec: $ref: "#/definitions/SwarmSpec" example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" DataPathPort: 4789 DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] SubnetSize: 24 ForceNewCluster: false Spec: Orchestration: {} Raft: {} Dispatcher: {} CAConfig: {} EncryptionConfig: AutoLockManagers: false tags: ["Swarm"] /swarm/join: post: summary: "Join an existing swarm" operationId: "SwarmJoin" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is already part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmJoinRequest" properties: ListenAddr: description: | Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). type: "string" AdvertiseAddr: description: | Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`, or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is possible to separate the container data traffic from the management traffic of the cluster. type: "string" RemoteAddrs: description: | Addresses of manager nodes already participating in the swarm. type: "array" items: type: "string" JoinToken: description: "Secret token for joining this swarm." type: "string" example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" RemoteAddrs: - "node1:2377" JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" tags: ["Swarm"] /swarm/leave: post: summary: "Leave a swarm" operationId: "SwarmLeave" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "force" description: | Force leave swarm, even if this is the last manager or that it will break the cluster. in: "query" type: "boolean" default: false tags: ["Swarm"] /swarm/update: post: summary: "Update a swarm" operationId: "SwarmUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: $ref: "#/definitions/SwarmSpec" - name: "version" in: "query" description: | The version number of the swarm object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true - name: "rotateWorkerToken" in: "query" description: "Rotate the worker join token." type: "boolean" default: false - name: "rotateManagerToken" in: "query" description: "Rotate the manager join token." type: "boolean" default: false - name: "rotateManagerUnlockKey" in: "query" description: "Rotate the manager unlock key." type: "boolean" default: false tags: ["Swarm"] /swarm/unlockkey: get: summary: "Get the unlock key" operationId: "SwarmUnlockkey" consumes: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "UnlockKeyResponse" properties: UnlockKey: description: "The swarm's unlock key." type: "string" example: UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /swarm/unlock: post: summary: "Unlock a locked manager" operationId: "SwarmUnlock" consumes: - "application/json" produces: - "application/json" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmUnlockRequest" properties: UnlockKey: description: "The swarm's unlock key." type: "string" example: UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /services: get: summary: "List services" operationId: "ServiceList" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Service" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: - `id=<service id>` - `label=<service label>` - `mode=["replicated"|"global"]` - `name=<service name>` - name: "status" in: "query" type: "boolean" description: | Include service status, with count of running and desired tasks. tags: ["Service"] /services/create: post: summary: "Create a service" operationId: "ServiceCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: type: "object" title: "ServiceCreateResponse" properties: ID: description: "The ID of the created service." type: "string" Warning: description: "Optional warning message" type: "string" example: ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 403: description: "network is not eligible for services" schema: $ref: "#/definitions/ErrorResponse" 409: description: "name conflicts with an existing service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: allOf: - $ref: "#/definitions/ServiceSpec" - type: "object" example: Name: "web" TaskTemplate: ContainerSpec: Image: "nginx:alpine" Mounts: - ReadOnly: true Source: "web-data" Target: "/usr/share/nginx/html" Type: "volume" VolumeOptions: DriverConfig: {} Labels: com.example.something: "something-value" Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] User: "33" DNSConfig: Nameservers: ["8.8.8.8"] Search: ["example.org"] Options: ["timeout:3"] Secrets: - File: Name: "www.example.org.key" UID: "33" GID: "33" Mode: 384 SecretID: "fpjqlhnwb19zds35k8wn80lq9" SecretName: "example_org_domain_key" LogDriver: Name: "json-file" Options: max-file: "3" max-size: "10M" Placement: {} Resources: Limits: MemoryBytes: 104857600 Reservations: {} RestartPolicy: Condition: "on-failure" Delay: 10000000000 MaxAttempts: 10 Mode: Replicated: Replicas: 4 UpdateConfig: Parallelism: 2 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Ports: - Protocol: "tcp" PublishedPort: 8080 TargetPort: 80 Labels: foo: "bar" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration for pulling from private registries. Refer to the [authentication section](#section/Authentication) for details. type: "string" tags: ["Service"] /services/{id}: get: summary: "Inspect a service" operationId: "ServiceInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Service" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" - name: "insertDefaults" in: "query" description: "Fill empty fields with default values." type: "boolean" default: false tags: ["Service"] delete: summary: "Delete a service" operationId: "ServiceDelete" responses: 200: description: "no error" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" tags: ["Service"] /services/{id}/update: post: summary: "Update a service" operationId: "ServiceUpdate" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "no error" schema: $ref: "#/definitions/ServiceUpdateResponse" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" - name: "body" in: "body" required: true schema: allOf: - $ref: "#/definitions/ServiceSpec" - type: "object" example: Name: "top" TaskTemplate: ContainerSpec: Image: "busybox" Args: - "top" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ForceUpdate: 0 Mode: Replicated: Replicas: 1 UpdateConfig: Parallelism: 2 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Mode: "vip" - name: "version" in: "query" description: | The version number of the service object being updated. This is required to avoid conflicting writes. This version number should be the value as currently set on the service *before* the update. You can find the current version by calling `GET /services/{id}` required: true type: "integer" - name: "registryAuthFrom" in: "query" description: | If the `X-Registry-Auth` header is not specified, this parameter indicates where to find registry authorization credentials. type: "string" enum: ["spec", "previous-spec"] default: "spec" - name: "rollback" in: "query" description: | Set to this parameter to `previous` to cause a server-side rollback to the previous service spec. The supplied spec will be ignored in this case. type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration for pulling from private registries. Refer to the [authentication section](#section/Authentication) for details. type: "string" tags: ["Service"] /services/{id}/logs: get: summary: "Get service logs" description: | Get `stdout` and `stderr` logs from a service. See also [`/containers/{id}/logs`](#operation/ContainerLogs). **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "ServiceLogs" responses: 200: description: "logs returned as a stream in response body" schema: type: "string" format: "binary" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such service: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the service" type: "string" - name: "details" in: "query" description: "Show service context and extra details provided to logs." type: "boolean" default: false - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Service"] /tasks: get: summary: "List tasks" operationId: "TaskList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Task" example: - ID: "0kzzo1i0y4jz6027t0k7aezc7" Version: Index: 71 CreatedAt: "2016-06-07T21:07:31.171892745Z" UpdatedAt: "2016-06-07T21:07:31.376370513Z" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:31.290032978Z" State: "running" Message: "started" ContainerStatus: ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" PID: 677 DesiredState: "running" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.10/16" - ID: "1yljwbmlr8er2waf8orvqpwms" Version: Index: 30 CreatedAt: "2016-06-07T21:07:30.019104782Z" UpdatedAt: "2016-06-07T21:07:30.231958098Z" Name: "hopeful_cori" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:30.202183143Z" State: "shutdown" Message: "shutdown" ContainerStatus: ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" DesiredState: "shutdown" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.5/16" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: - `desired-state=(running | shutdown | accepted)` - `id=<task id>` - `label=key` or `label="key=value"` - `name=<task name>` - `node=<node id or name>` - `service=<service name>` tags: ["Task"] /tasks/{id}: get: summary: "Inspect a task" operationId: "TaskInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Task" 404: description: "no such task" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID of the task" required: true type: "string" tags: ["Task"] /tasks/{id}/logs: get: summary: "Get task logs" description: | Get `stdout` and `stderr` logs from a task. See also [`/containers/{id}/logs`](#operation/ContainerLogs). **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "TaskLogs" responses: 200: description: "logs returned as a stream in response body" schema: type: "string" format: "binary" 404: description: "no such task" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such task: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID of the task" type: "string" - name: "details" in: "query" description: "Show task context and extra details provided to logs." type: "boolean" default: false - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Task"] /secrets: get: summary: "List secrets" operationId: "SecretList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Secret" example: - ID: "blt1owaxmitz71s9v5zh81zun" Version: Index: 85 CreatedAt: "2017-07-20T13:55:28.678958722Z" UpdatedAt: "2017-07-20T13:55:28.678958722Z" Spec: Name: "mysql-passwd" Labels: some.label: "some.value" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" - ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" Labels: foo: "bar" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: - `id=<secret id>` - `label=<key> or label=<key>=value` - `name=<secret name>` - `names=<secret name>` tags: ["Secret"] /secrets/create: post: summary: "Create a secret" operationId: "SecretCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 409: description: "name conflicts with an existing object" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" schema: allOf: - $ref: "#/definitions/SecretSpec" - type: "object" example: Name: "app-key.crt" Labels: foo: "bar" Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" tags: ["Secret"] /secrets/{id}: get: summary: "Inspect a secret" operationId: "SecretInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Secret" examples: application/json: ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" Labels: foo: "bar" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" 404: description: "secret not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the secret" tags: ["Secret"] delete: summary: "Delete a secret" operationId: "SecretDelete" produces: - "application/json" responses: 204: description: "no error" 404: description: "secret not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the secret" tags: ["Secret"] /secrets/{id}/update: post: summary: "Update a Secret" operationId: "SecretUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such secret" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the secret" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/SecretSpec" description: | The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values. - name: "version" in: "query" description: | The version number of the secret object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Secret"] /configs: get: summary: "List configs" operationId: "ConfigList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Config" example: - ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "server.conf" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters: - `id=<config id>` - `label=<key> or label=<key>=value` - `name=<config name>` - `names=<config name>` tags: ["Config"] /configs/create: post: summary: "Create a config" operationId: "ConfigCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 409: description: "name conflicts with an existing object" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" schema: allOf: - $ref: "#/definitions/ConfigSpec" - type: "object" example: Name: "server.conf" Labels: foo: "bar" Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" tags: ["Config"] /configs/{id}: get: summary: "Inspect a config" operationId: "ConfigInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Config" examples: application/json: ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" 404: description: "config not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the config" tags: ["Config"] delete: summary: "Delete a config" operationId: "ConfigDelete" produces: - "application/json" responses: 204: description: "no error" 404: description: "config not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the config" tags: ["Config"] /configs/{id}/update: post: summary: "Update a Config" operationId: "ConfigUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such config" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the config" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/ConfigSpec" description: | The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values. - name: "version" in: "query" description: | The version number of the config object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Config"] /distribution/{name}/json: get: summary: "Get image information from the registry" description: | Return image digest and platform information by contacting the registry. operationId: "DistributionInspect" produces: - "application/json" responses: 200: description: "descriptor and platform information" schema: $ref: "#/definitions/DistributionInspect" 401: description: "Failed authentication or no image found" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: someimage (tag: latest)" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or id" type: "string" required: true tags: ["Distribution"] /session: post: summary: "Initialize interactive session" description: | Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. ### Hijacking This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. For example, the client sends this request to upgrade the connection: ``` POST /session HTTP/1.1 Upgrade: h2c Connection: Upgrade ``` The Docker daemon responds with a `101 UPGRADED` response follow with the raw stream: ``` HTTP/1.1 101 UPGRADED Connection: Upgrade Upgrade: h2c ``` operationId: "Session" produces: - "application/vnd.docker.raw-stream" responses: 101: description: "no error, hijacking successful" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Session"]
thaJeztah
8fa6126f75f255805fb7f6ffa716d03b4cc7f76d
772e25fa9f00577ba9f6641530e5aad5ec5ff84c
Do you think it's worth having these "OCI" definitions link to the OCI specs? (like https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md, for example)
tianon
4,491
moby/moby
42,769
swagger: assorted fixes and updates
includes some follow-ups to https://github.com/moby/moby/pull/42621 - api/swagger: fix up event-types and move to definitions - api/swagger: rename PluginPrivilegeItem to PluginPrivilege - api/swagger: move DistributionInspect to definitions **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-08-21 22:31:49+00:00
2021-09-02 21:23:49+00:00
api/swagger.yaml
# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. # # This is used for generating API documentation and the types used by the # client/server. See api/README.md for more information. # # Some style notes: # - This file is used by ReDoc, which allows GitHub Flavored Markdown in # descriptions. # - There is no maximum line length, for ease of editing and pretty diffs. # - operationIds are in the format "NounVerb", with a singular noun. swagger: "2.0" schemes: - "http" - "https" produces: - "application/json" - "text/plain" consumes: - "application/json" - "text/plain" basePath: "/v1.42" info: title: "Docker Engine API" version: "1.42" x-logo: url: "https://docs.docker.com/images/logo-docker-main.png" description: | The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. # Errors The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: ``` { "message": "page not found" } ``` # Versioning The API is usually changed in each release, so API calls are versioned to ensure that clients don't break. To lock to a specific version of the API, you prefix the URL with its version, for example, call `/v1.30/info` to use the v1.30 version of the `/info` endpoint. If the API version specified in the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. If you omit the version-prefix, the current version of the API (v1.42) is used. For example, calling `/info` is the same as calling `/v1.42/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer daemons. # Authentication Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) (JSON) string with the following structure: ``` { "username": "string", "password": "string", "email": "string", "serveraddress": "string" } ``` The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: ``` { "identitytoken": "9cbaf023786cd7..." } ``` # The tags on paths define the menu sections in the ReDoc documentation, so # the usage of tags must make sense for that: # - They should be singular, not plural. # - There should not be too many tags, or the menu becomes unwieldy. For # example, it is preferable to add a path to the "System" tag instead of # creating a tag with a single path in it. # - The order of tags in this list defines the order in the menu. tags: # Primary objects - name: "Container" x-displayName: "Containers" description: | Create and manage containers. - name: "Image" x-displayName: "Images" - name: "Network" x-displayName: "Networks" description: | Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/network/) for more information. - name: "Volume" x-displayName: "Volumes" description: | Create and manage persistent storage that can be attached to containers. - name: "Exec" x-displayName: "Exec" description: | Run new commands inside running containers. Refer to the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. # Swarm things - name: "Swarm" x-displayName: "Swarm" description: | Engines can be clustered together in a swarm. Refer to the [swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. - name: "Node" x-displayName: "Nodes" description: | Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Service" x-displayName: "Services" description: | Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Task" x-displayName: "Tasks" description: | A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. - name: "Secret" x-displayName: "Secrets" description: | Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. - name: "Config" x-displayName: "Configs" description: | Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work. # System things - name: "Plugin" x-displayName: "Plugins" - name: "System" x-displayName: "System" definitions: Port: type: "object" description: "An open port on a container" required: [PrivatePort, Type] properties: IP: type: "string" format: "ip-address" description: "Host IP address that the container's port is mapped to" PrivatePort: type: "integer" format: "uint16" x-nullable: false description: "Port on the container" PublicPort: type: "integer" format: "uint16" description: "Port exposed on the host" Type: type: "string" x-nullable: false enum: ["tcp", "udp", "sctp"] example: PrivatePort: 8080 PublicPort: 80 Type: "tcp" MountPoint: type: "object" description: "A mount point inside a container" properties: Type: type: "string" Name: type: "string" Source: type: "string" Destination: type: "string" Driver: type: "string" Mode: type: "string" RW: type: "boolean" Propagation: type: "string" DeviceMapping: type: "object" description: "A device mapping between the host and container" properties: PathOnHost: type: "string" PathInContainer: type: "string" CgroupPermissions: type: "string" example: PathOnHost: "/dev/deviceName" PathInContainer: "/dev/deviceName" CgroupPermissions: "mrw" DeviceRequest: type: "object" description: "A request for devices to be sent to device drivers" properties: Driver: type: "string" example: "nvidia" Count: type: "integer" example: -1 DeviceIDs: type: "array" items: type: "string" example: - "0" - "1" - "GPU-fef8089b-4820-abfc-e83e-94318197576e" Capabilities: description: | A list of capabilities; an OR list of AND lists of capabilities. type: "array" items: type: "array" items: type: "string" example: # gpu AND nvidia AND compute - ["gpu", "nvidia", "compute"] Options: description: | Driver-specific options, specified as a key/value pairs. These options are passed directly to the driver. type: "object" additionalProperties: type: "string" ThrottleDevice: type: "object" properties: Path: description: "Device path" type: "string" Rate: description: "Rate" type: "integer" format: "int64" minimum: 0 Mount: type: "object" properties: Target: description: "Container path." type: "string" Source: description: "Mount source (e.g. a volume name, a host path)." type: "string" Type: description: | The mount type. Available types: - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. type: "string" enum: - "bind" - "volume" - "tmpfs" - "npipe" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" Consistency: description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." type: "string" BindOptions: description: "Optional configuration for the `bind` type." type: "object" properties: Propagation: description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." type: "string" enum: - "private" - "rprivate" - "shared" - "rshared" - "slave" - "rslave" NonRecursive: description: "Disable recursive bind mount." type: "boolean" default: false VolumeOptions: description: "Optional configuration for the `volume` type." type: "object" properties: NoCopy: description: "Populate volume with data from the target." type: "boolean" default: false Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" DriverConfig: description: "Map of driver specific options" type: "object" properties: Name: description: "Name of the driver to use to create the volume." type: "string" Options: description: "key/value map of driver specific options." type: "object" additionalProperties: type: "string" TmpfsOptions: description: "Optional configuration for the `tmpfs` type." type: "object" properties: SizeBytes: description: "The size for the tmpfs mount in bytes." type: "integer" format: "int64" Mode: description: "The permission mode for the tmpfs mount in an integer." type: "integer" RestartPolicy: description: | The behavior to apply when the container exits. The default is not to restart. An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. type: "object" properties: Name: type: "string" description: | - Empty string means not to restart - `no` Do not automatically restart - `always` Always restart - `unless-stopped` Restart always except when the user has manually stopped the container - `on-failure` Restart only when the container exit code is non-zero enum: - "" - "no" - "always" - "unless-stopped" - "on-failure" MaximumRetryCount: type: "integer" description: | If `on-failure` is used, the number of times to retry before giving up. Resources: description: "A container's resources (cgroups config, ulimits, etc)" type: "object" properties: # Applicable to all platforms CpuShares: description: | An integer value representing this container's relative CPU weight versus other containers. type: "integer" Memory: description: "Memory limit in bytes." type: "integer" format: "int64" default: 0 # Applicable to UNIX platforms CgroupParent: description: | Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. type: "string" BlkioWeight: description: "Block IO weight (relative weight)." type: "integer" minimum: 0 maximum: 1000 BlkioWeightDevice: description: | Block IO weight (relative device weight) in the form: ``` [{"Path": "device_path", "Weight": weight}] ``` type: "array" items: type: "object" properties: Path: type: "string" Weight: type: "integer" minimum: 0 BlkioDeviceReadBps: description: | Limit read rate (bytes per second) from a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteBps: description: | Limit write rate (bytes per second) to a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceReadIOps: description: | Limit read rate (IO per second) from a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteIOps: description: | Limit write rate (IO per second) to a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" CpuPeriod: description: "The length of a CPU period in microseconds." type: "integer" format: "int64" CpuQuota: description: | Microseconds of CPU time that the container can get in a CPU period. type: "integer" format: "int64" CpuRealtimePeriod: description: | The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpuRealtimeRuntime: description: | The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpusetCpus: description: | CPUs in which to allow execution (e.g., `0-3`, `0,1`). type: "string" example: "0-3" CpusetMems: description: | Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. type: "string" Devices: description: "A list of devices to add to the container." type: "array" items: $ref: "#/definitions/DeviceMapping" DeviceCgroupRules: description: "a list of cgroup rules to apply to the container" type: "array" items: type: "string" example: "c 13:* rwm" DeviceRequests: description: | A list of requests for devices to be sent to device drivers. type: "array" items: $ref: "#/definitions/DeviceRequest" KernelMemory: description: | Kernel memory limit in bytes. <p><br /></p> > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated > `kmem.limit_in_bytes`. type: "integer" format: "int64" example: 209715200 KernelMemoryTCP: description: "Hard limit for kernel TCP buffer memory (in bytes)." type: "integer" format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" format: "int64" MemorySwap: description: | Total memory limit (memory + swap). Set as `-1` to enable unlimited swap. type: "integer" format: "int64" MemorySwappiness: description: | Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. type: "integer" format: "int64" minimum: 0 maximum: 100 NanoCpus: description: "CPU quota in units of 10<sup>-9</sup> CPUs." type: "integer" format: "int64" OomKillDisable: description: "Disable OOM Killer for the container." type: "boolean" Init: description: | Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. type: "boolean" x-nullable: true PidsLimit: description: | Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. type: "integer" format: "int64" x-nullable: true Ulimits: description: | A list of resource limits to set in the container. For example: ``` {"Name": "nofile", "Soft": 1024, "Hard": 2048} ``` type: "array" items: type: "object" properties: Name: description: "Name of ulimit" type: "string" Soft: description: "Soft limit" type: "integer" Hard: description: "Hard limit" type: "integer" # Applicable to Windows CpuCount: description: | The number of usable CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" CpuPercent: description: | The usable percentage of the available CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" IOMaximumIOps: description: "Maximum IOps for the container system drive (Windows only)" type: "integer" format: "int64" IOMaximumBandwidth: description: | Maximum IO in bytes per second for the container system drive (Windows only). type: "integer" format: "int64" Limit: description: | An object describing a limit on resources which can be requested by a task. type: "object" properties: NanoCPUs: type: "integer" format: "int64" example: 4000000000 MemoryBytes: type: "integer" format: "int64" example: 8272408576 Pids: description: | Limits the maximum number of PIDs in the container. Set `0` for unlimited. type: "integer" format: "int64" default: 0 example: 100 ResourceObject: description: | An object describing the resources which can be advertised by a node and requested by a task. type: "object" properties: NanoCPUs: type: "integer" format: "int64" example: 4000000000 MemoryBytes: type: "integer" format: "int64" example: 8272408576 GenericResources: $ref: "#/definitions/GenericResources" GenericResources: description: | User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`). type: "array" items: type: "object" properties: NamedResourceSpec: type: "object" properties: Kind: type: "string" Value: type: "string" DiscreteResourceSpec: type: "object" properties: Kind: type: "string" Value: type: "integer" format: "int64" example: - DiscreteResourceSpec: Kind: "SSD" Value: 3 - NamedResourceSpec: Kind: "GPU" Value: "UUID1" - NamedResourceSpec: Kind: "GPU" Value: "UUID2" HealthConfig: description: "A test to perform to check that the container is healthy." type: "object" properties: Test: description: | The test to perform. Possible values are: - `[]` inherit healthcheck from image or parent image - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell type: "array" items: type: "string" Interval: description: | The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Timeout: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Retries: description: | The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit. type: "integer" StartPeriod: description: | Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Health: description: | Health stores information about the container's healthcheck results. type: "object" properties: Status: description: | Status is one of `none`, `starting`, `healthy` or `unhealthy` - "none" Indicates there is no healthcheck - "starting" Starting indicates that the container is not yet ready - "healthy" Healthy indicates that the container is running correctly - "unhealthy" Unhealthy indicates that the container has a problem type: "string" enum: - "none" - "starting" - "healthy" - "unhealthy" example: "healthy" FailingStreak: description: "FailingStreak is the number of consecutive failures" type: "integer" example: 0 Log: type: "array" description: | Log contains the last few results (oldest first) items: x-nullable: true $ref: "#/definitions/HealthcheckResult" HealthcheckResult: description: | HealthcheckResult stores information about a single run of a healthcheck probe type: "object" properties: Start: description: | Date and time at which this check started in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "date-time" example: "2020-01-04T10:44:24.496525531Z" End: description: | Date and time at which this check ended in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2020-01-04T10:45:21.364524523Z" ExitCode: description: | ExitCode meanings: - `0` healthy - `1` unhealthy - `2` reserved (considered unhealthy) - other values: error running probe type: "integer" example: 0 Output: description: "Output from last check" type: "string" HostConfig: description: "Container configuration that depends on the host we are running on" allOf: - $ref: "#/definitions/Resources" - type: "object" properties: # Applicable to all platforms Binds: type: "array" description: | A list of volume bindings for this container. Each volume binding is a string in one of these forms: - `host-src:container-dest[:options]` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - `volume-name:container-dest[:options]` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. `options` is an optional, comma-delimited list of: - `nocopy` disables automatic copying of data from the container path to the volume. The `nocopy` flag only applies to named volumes. - `[ro|rw]` mounts a volume read-only or read-write, respectively. If omitted or set to `rw`, volumes are mounted read-write. - `[z|Z]` applies SELinux labels to allow or deny multiple containers to read and write to the same volume. - `z`: a _shared_ content label is applied to the content. This label indicates that multiple containers can share the volume content, for both reading and writing. - `Z`: a _private unshared_ label is applied to the content. This label indicates that only the current container can use a private volume. Labeling systems such as SELinux require proper labels to be placed on volume content that is mounted into a container. Without a label, the security system can prevent a container's processes from using the content. By default, the labels set by the host operating system are not modified. - `[[r]shared|[r]slave|[r]private]` specifies mount [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). This only applies to bind-mounted volumes, not internal volumes or named volumes. Mount propagation requires the source mount point (the location where the source directory is mounted in the host operating system) to have the correct propagation properties. For shared volumes, the source mount point must be set to `shared`. For slave volumes, the mount must be set to either `shared` or `slave`. items: type: "string" ContainerIDFile: type: "string" description: "Path to a file where the container ID is written" LogConfig: type: "object" description: "The logging configuration for this container" properties: Type: type: "string" enum: - "json-file" - "syslog" - "journald" - "gelf" - "fluentd" - "awslogs" - "splunk" - "etwlogs" - "none" Config: type: "object" additionalProperties: type: "string" NetworkMode: type: "string" description: | Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken as a custom network's name to which this container should connect to. PortBindings: $ref: "#/definitions/PortMap" RestartPolicy: $ref: "#/definitions/RestartPolicy" AutoRemove: type: "boolean" description: | Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set. VolumeDriver: type: "string" description: "Driver that this container uses to mount volumes." VolumesFrom: type: "array" description: | A list of volumes to inherit from another container, specified in the form `<container name>[:<ro|rw>]`. items: type: "string" Mounts: description: | Specification for mounts to be added to the container. type: "array" items: $ref: "#/definitions/Mount" # Applicable to UNIX platforms CapAdd: type: "array" description: | A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'. items: type: "string" CapDrop: type: "array" description: | A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'. items: type: "string" CgroupnsMode: type: "string" enum: - "private" - "host" description: | cgroup namespace mode for the container. Possible values are: - `"private"`: the container runs in its own private cgroup namespace - `"host"`: use the host system's cgroup namespace If not specified, the daemon default is used, which can either be `"private"` or `"host"`, depending on daemon version, kernel support and configuration. Dns: type: "array" description: "A list of DNS servers for the container to use." items: type: "string" DnsOptions: type: "array" description: "A list of DNS options." items: type: "string" DnsSearch: type: "array" description: "A list of DNS search domains." items: type: "string" ExtraHosts: type: "array" description: | A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. items: type: "string" GroupAdd: type: "array" description: | A list of additional groups that the container process will run as. items: type: "string" IpcMode: type: "string" description: | IPC sharing mode for the container. Possible values are: - `"none"`: own private IPC namespace, with /dev/shm not mounted - `"private"`: own private IPC namespace - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers - `"container:<name|id>"`: join another (shareable) container's IPC namespace - `"host"`: use the host system's IPC namespace If not specified, daemon default is used, which can either be `"private"` or `"shareable"`, depending on daemon version and configuration. Cgroup: type: "string" description: "Cgroup to use for the container." Links: type: "array" description: | A list of links for the container in the form `container_name:alias`. items: type: "string" OomScoreAdj: type: "integer" description: | An integer value containing the score given to the container in order to tune OOM killer preferences. example: 500 PidMode: type: "string" description: | Set the PID (Process) Namespace mode for the container. It can be either: - `"container:<name|id>"`: joins another container's PID namespace - `"host"`: use the host's PID namespace inside the container Privileged: type: "boolean" description: "Gives the container full access to the host." PublishAllPorts: type: "boolean" description: | Allocates an ephemeral host port for all of a container's exposed ports. Ports are de-allocated when the container stops and allocated when the container starts. The allocated port might be changed when restarting the container. The port is selected from the ephemeral port range that depends on the kernel. For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. ReadonlyRootfs: type: "boolean" description: "Mount the container's root filesystem as read only." SecurityOpt: type: "array" description: "A list of string values to customize labels for MLS systems, such as SELinux." items: type: "string" StorageOpt: type: "object" description: | Storage driver options for this container, in the form `{"size": "120G"}`. additionalProperties: type: "string" Tmpfs: type: "object" description: | A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: ``` { "/run": "rw,noexec,nosuid,size=65536k" } ``` additionalProperties: type: "string" UTSMode: type: "string" description: "UTS namespace to use for the container." UsernsMode: type: "string" description: | Sets the usernamespace mode for the container when usernamespace remapping option is enabled. ShmSize: type: "integer" description: | Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. minimum: 0 Sysctls: type: "object" description: | A list of kernel parameters (sysctls) to set in the container. For example: ``` {"net.ipv4.ip_forward": "1"} ``` additionalProperties: type: "string" Runtime: type: "string" description: "Runtime to use with this container." # Applicable to Windows ConsoleSize: type: "array" description: | Initial console size, as an `[height, width]` array. (Windows only) minItems: 2 maxItems: 2 items: type: "integer" minimum: 0 Isolation: type: "string" description: | Isolation technology of the container. (Windows only) enum: - "default" - "process" - "hyperv" MaskedPaths: type: "array" description: | The list of paths to be masked inside the container (this overrides the default set of paths). items: type: "string" ReadonlyPaths: type: "array" description: | The list of paths to be set as read-only inside the container (this overrides the default set of paths). items: type: "string" ContainerConfig: description: "Configuration for a container that is portable between hosts" type: "object" properties: Hostname: description: "The hostname to use for the container, as a valid RFC 1123 hostname." type: "string" Domainname: description: "The domain name to use for the container." type: "string" User: description: "The user that commands are run as inside the container." type: "string" AttachStdin: description: "Whether to attach to `stdin`." type: "boolean" default: false AttachStdout: description: "Whether to attach to `stdout`." type: "boolean" default: true AttachStderr: description: "Whether to attach to `stderr`." type: "boolean" default: true ExposedPorts: description: | An object mapping ports to an empty object in the form: `{"<port>/<tcp|udp|sctp>": {}}` type: "object" additionalProperties: type: "object" enum: - {} default: {} Tty: description: | Attach standard streams to a TTY, including `stdin` if it is not closed. type: "boolean" default: false OpenStdin: description: "Open `stdin`" type: "boolean" default: false StdinOnce: description: "Close `stdin` after one attached client disconnects" type: "boolean" default: false Env: description: | A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. type: "array" items: type: "string" Cmd: description: | Command to run specified as a string or an array of strings. type: "array" items: type: "string" Healthcheck: $ref: "#/definitions/HealthConfig" ArgsEscaped: description: "Command is already escaped (Windows only)" type: "boolean" Image: description: | The name of the image to use when creating the container/ type: "string" Volumes: description: | An object mapping mount point paths inside the container to empty objects. type: "object" additionalProperties: type: "object" enum: - {} default: {} WorkingDir: description: "The working directory for commands to run in." type: "string" Entrypoint: description: | The entry point for the container as a string or an array of strings. If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). type: "array" items: type: "string" NetworkDisabled: description: "Disable networking for the container." type: "boolean" MacAddress: description: "MAC address of the container." type: "string" OnBuild: description: | `ONBUILD` metadata that were defined in the image's `Dockerfile`. type: "array" items: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" StopSignal: description: | Signal to stop a container as a string or unsigned integer. type: "string" default: "SIGTERM" StopTimeout: description: "Timeout to stop a container in seconds." type: "integer" default: 10 Shell: description: | Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. type: "array" items: type: "string" NetworkingConfig: description: | NetworkingConfig represents the container's networking configuration for each of its interfaces. It is used for the networking configs specified in the `docker create` and `docker network connect` commands. type: "object" properties: EndpointsConfig: description: | A mapping of network name to endpoint configuration for that network. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" example: # putting an example here, instead of using the example values from # /definitions/EndpointSettings, because containers/create currently # does not support attaching to multiple networks, so the example request # would be confusing if it showed that multiple networks can be contained # in the EndpointsConfig. # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) EndpointsConfig: isolated_nw: IPAMConfig: IPv4Address: "172.20.30.33" IPv6Address: "2001:db8:abcd::3033" LinkLocalIPs: - "169.254.34.68" - "fe80::3468" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" NetworkSettings: description: "NetworkSettings exposes the network settings in the API" type: "object" properties: Bridge: description: Name of the network's bridge (for example, `docker0`). type: "string" example: "docker0" SandboxID: description: SandboxID uniquely represents a container's network stack. type: "string" example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" HairpinMode: description: | Indicates if hairpin NAT should be enabled on the virtual interface. type: "boolean" example: false LinkLocalIPv6Address: description: IPv6 unicast address using the link-local prefix. type: "string" example: "fe80::42:acff:fe11:1" LinkLocalIPv6PrefixLen: description: Prefix length of the IPv6 unicast address. type: "integer" example: "64" Ports: $ref: "#/definitions/PortMap" SandboxKey: description: SandboxKey identifies the sandbox type: "string" example: "/var/run/docker/netns/8ab54b426c38" # TODO is SecondaryIPAddresses actually used? SecondaryIPAddresses: description: "" type: "array" items: $ref: "#/definitions/Address" x-nullable: true # TODO is SecondaryIPv6Addresses actually used? SecondaryIPv6Addresses: description: "" type: "array" items: $ref: "#/definitions/Address" x-nullable: true # TODO properties below are part of DefaultNetworkSettings, which is # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 EndpointID: description: | EndpointID uniquely represents a service endpoint in a Sandbox. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" Gateway: description: | Gateway address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "172.17.0.1" GlobalIPv6Address: description: | Global IPv6 address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "2001:db8::5689" GlobalIPv6PrefixLen: description: | Mask length of the global IPv6 address. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "integer" example: 64 IPAddress: description: | IPv4 address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "172.17.0.4" IPPrefixLen: description: | Mask length of the IPv4 address. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "integer" example: 16 IPv6Gateway: description: | IPv6 gateway address for this network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "2001:db8:2::100" MacAddress: description: | MAC address for the container on the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "02:42:ac:11:00:04" Networks: description: | Information about all networks that the container is connected to. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" Address: description: Address represents an IPv4 or IPv6 IP address. type: "object" properties: Addr: description: IP address. type: "string" PrefixLen: description: Mask length of the IP address. type: "integer" PortMap: description: | PortMap describes the mapping of container ports to host ports, using the container's port-number and protocol as key in the format `<port>/<protocol>`, for example, `80/udp`. If a container's port is mapped for multiple protocols, separate entries are added to the mapping table. type: "object" additionalProperties: type: "array" x-nullable: true items: $ref: "#/definitions/PortBinding" example: "443/tcp": - HostIp: "127.0.0.1" HostPort: "4443" "80/tcp": - HostIp: "0.0.0.0" HostPort: "80" - HostIp: "0.0.0.0" HostPort: "8080" "80/udp": - HostIp: "0.0.0.0" HostPort: "80" "53/udp": - HostIp: "0.0.0.0" HostPort: "53" "2377/tcp": null PortBinding: description: | PortBinding represents a binding between a host IP address and a host port. type: "object" properties: HostIp: description: "Host IP address that the container's port is mapped to." type: "string" example: "127.0.0.1" HostPort: description: "Host port number that the container's port is mapped to." type: "string" example: "4443" GraphDriverData: description: "Information about a container's graph driver." type: "object" required: [Name, Data] properties: Name: type: "string" x-nullable: false Data: type: "object" x-nullable: false additionalProperties: type: "string" Image: type: "object" required: - Id - Parent - Comment - Created - Container - DockerVersion - Author - Architecture - Os - Size - VirtualSize - GraphDriver - RootFS properties: Id: type: "string" x-nullable: false RepoTags: type: "array" items: type: "string" RepoDigests: type: "array" items: type: "string" Parent: type: "string" x-nullable: false Comment: type: "string" x-nullable: false Created: type: "string" x-nullable: false Container: type: "string" x-nullable: false ContainerConfig: $ref: "#/definitions/ContainerConfig" DockerVersion: type: "string" x-nullable: false Author: type: "string" x-nullable: false Config: $ref: "#/definitions/ContainerConfig" Architecture: type: "string" x-nullable: false Os: type: "string" x-nullable: false OsVersion: type: "string" Size: type: "integer" format: "int64" x-nullable: false VirtualSize: type: "integer" format: "int64" x-nullable: false GraphDriver: $ref: "#/definitions/GraphDriverData" RootFS: type: "object" required: [Type] properties: Type: type: "string" x-nullable: false Layers: type: "array" items: type: "string" BaseLayer: type: "string" Metadata: type: "object" properties: LastTagTime: type: "string" format: "dateTime" ImageSummary: type: "object" required: - Id - ParentId - RepoTags - RepoDigests - Created - Size - SharedSize - VirtualSize - Labels - Containers properties: Id: type: "string" x-nullable: false ParentId: type: "string" x-nullable: false RepoTags: type: "array" x-nullable: false items: type: "string" RepoDigests: type: "array" x-nullable: false items: type: "string" Created: type: "integer" x-nullable: false Size: type: "integer" x-nullable: false SharedSize: type: "integer" x-nullable: false VirtualSize: type: "integer" x-nullable: false Labels: type: "object" x-nullable: false additionalProperties: type: "string" Containers: x-nullable: false type: "integer" AuthConfig: type: "object" properties: username: type: "string" password: type: "string" email: type: "string" serveraddress: type: "string" example: username: "hannibal" password: "xxxx" serveraddress: "https://index.docker.io/v1/" ProcessConfig: type: "object" properties: privileged: type: "boolean" user: type: "string" tty: type: "boolean" entrypoint: type: "string" arguments: type: "array" items: type: "string" Volume: type: "object" required: [Name, Driver, Mountpoint, Labels, Scope, Options] properties: Name: type: "string" description: "Name of the volume." x-nullable: false Driver: type: "string" description: "Name of the volume driver used by the volume." x-nullable: false Mountpoint: type: "string" description: "Mount path of the volume on the host." x-nullable: false CreatedAt: type: "string" format: "dateTime" description: "Date/Time the volume was created." Status: type: "object" description: | Low-level details about the volume, provided by the volume driver. Details are returned as a map with key/value pairs: `{"key":"value","key2":"value2"}`. The `Status` field is optional, and is omitted if the volume driver does not support this feature. additionalProperties: type: "object" Labels: type: "object" description: "User-defined key/value metadata." x-nullable: false additionalProperties: type: "string" Scope: type: "string" description: | The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. default: "local" x-nullable: false enum: ["local", "global"] Options: type: "object" description: | The driver specific options used when creating the volume. additionalProperties: type: "string" UsageData: type: "object" x-nullable: true required: [Size, RefCount] description: | Usage details about the volume. This information is used by the `GET /system/df` endpoint, and omitted in other endpoints. properties: Size: type: "integer" default: -1 description: | Amount of disk space used by the volume (in bytes). This information is only available for volumes created with the `"local"` volume driver. For volumes created with other volume drivers, this field is set to `-1` ("not available") x-nullable: false RefCount: type: "integer" default: -1 description: | The number of containers referencing this volume. This field is set to `-1` if the reference-count is not available. x-nullable: false example: Name: "tardis" Driver: "custom" Mountpoint: "/var/lib/docker/volumes/tardis" Status: hello: "world" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Scope: "local" CreatedAt: "2016-06-07T20:31:11.853781916Z" Network: type: "object" properties: Name: type: "string" Id: type: "string" Created: type: "string" format: "dateTime" Scope: type: "string" Driver: type: "string" EnableIPv6: type: "boolean" IPAM: $ref: "#/definitions/IPAM" Internal: type: "boolean" Attachable: type: "boolean" Ingress: type: "boolean" Containers: type: "object" additionalProperties: $ref: "#/definitions/NetworkContainer" Options: type: "object" additionalProperties: type: "string" Labels: type: "object" additionalProperties: type: "string" example: Name: "net01" Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" Created: "2016-10-19T04:33:30.360899459Z" Scope: "local" Driver: "bridge" EnableIPv6: false IPAM: Driver: "default" Config: - Subnet: "172.19.0.0/16" Gateway: "172.19.0.1" Options: foo: "bar" Internal: false Attachable: false Ingress: false Containers: 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: Name: "test" EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" MacAddress: "02:42:ac:13:00:02" IPv4Address: "172.19.0.2/16" IPv6Address: "" Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" IPAM: type: "object" properties: Driver: description: "Name of the IPAM driver to use." type: "string" default: "default" Config: description: | List of IPAM configuration options, specified as a map: ``` {"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>} ``` type: "array" items: type: "object" additionalProperties: type: "string" Options: description: "Driver-specific options, specified as a map." type: "object" additionalProperties: type: "string" NetworkContainer: type: "object" properties: Name: type: "string" EndpointID: type: "string" MacAddress: type: "string" IPv4Address: type: "string" IPv6Address: type: "string" BuildInfo: type: "object" properties: id: type: "string" stream: type: "string" error: type: "string" errorDetail: $ref: "#/definitions/ErrorDetail" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" aux: $ref: "#/definitions/ImageID" BuildCache: type: "object" properties: ID: type: "string" Parent: type: "string" Type: type: "string" Description: type: "string" InUse: type: "boolean" Shared: type: "boolean" Size: description: | Amount of disk space used by the build cache (in bytes). type: "integer" CreatedAt: description: | Date and time at which the build cache was created in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" LastUsedAt: description: | Date and time at which the build cache was last used in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" x-nullable: true example: "2017-08-09T07:09:37.632105588Z" UsageCount: type: "integer" ImageID: type: "object" description: "Image ID or Digest" properties: ID: type: "string" example: ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" CreateImageInfo: type: "object" properties: id: type: "string" error: type: "string" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" PushImageInfo: type: "object" properties: error: type: "string" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" ErrorDetail: type: "object" properties: code: type: "integer" message: type: "string" ProgressDetail: type: "object" properties: current: type: "integer" total: type: "integer" ErrorResponse: description: "Represents an error." type: "object" required: ["message"] properties: message: description: "The error message." type: "string" x-nullable: false example: message: "Something went wrong." IdResponse: description: "Response to an API call that returns just an Id" type: "object" required: ["Id"] properties: Id: description: "The id of the newly created object." type: "string" x-nullable: false EndpointSettings: description: "Configuration for a network endpoint." type: "object" properties: # Configurations IPAMConfig: $ref: "#/definitions/EndpointIPAMConfig" Links: type: "array" items: type: "string" example: - "container_1" - "container_2" Aliases: type: "array" items: type: "string" example: - "server_x" - "server_y" # Operational data NetworkID: description: | Unique ID of the network. type: "string" example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" EndpointID: description: | Unique ID for the service endpoint in a Sandbox. type: "string" example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" Gateway: description: | Gateway address for this network. type: "string" example: "172.17.0.1" IPAddress: description: | IPv4 address. type: "string" example: "172.17.0.4" IPPrefixLen: description: | Mask length of the IPv4 address. type: "integer" example: 16 IPv6Gateway: description: | IPv6 gateway address. type: "string" example: "2001:db8:2::100" GlobalIPv6Address: description: | Global IPv6 address. type: "string" example: "2001:db8::5689" GlobalIPv6PrefixLen: description: | Mask length of the global IPv6 address. type: "integer" format: "int64" example: 64 MacAddress: description: | MAC address for the endpoint on this network. type: "string" example: "02:42:ac:11:00:04" DriverOpts: description: | DriverOpts is a mapping of driver options and values. These options are passed directly to the driver and are driver specific. type: "object" x-nullable: true additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" EndpointIPAMConfig: description: | EndpointIPAMConfig represents an endpoint's IPAM configuration. type: "object" x-nullable: true properties: IPv4Address: type: "string" example: "172.20.30.33" IPv6Address: type: "string" example: "2001:db8:abcd::3033" LinkLocalIPs: type: "array" items: type: "string" example: - "169.254.34.68" - "fe80::3468" PluginMount: type: "object" x-nullable: false required: [Name, Description, Settable, Source, Destination, Type, Options] properties: Name: type: "string" x-nullable: false example: "some-mount" Description: type: "string" x-nullable: false example: "This is a mount that's used by the plugin." Settable: type: "array" items: type: "string" Source: type: "string" example: "/var/lib/docker/plugins/" Destination: type: "string" x-nullable: false example: "/mnt/state" Type: type: "string" x-nullable: false example: "bind" Options: type: "array" items: type: "string" example: - "rbind" - "rw" PluginDevice: type: "object" required: [Name, Description, Settable, Path] x-nullable: false properties: Name: type: "string" x-nullable: false Description: type: "string" x-nullable: false Settable: type: "array" items: type: "string" Path: type: "string" example: "/dev/fuse" PluginEnv: type: "object" x-nullable: false required: [Name, Description, Settable, Value] properties: Name: x-nullable: false type: "string" Description: x-nullable: false type: "string" Settable: type: "array" items: type: "string" Value: type: "string" PluginInterfaceType: type: "object" x-nullable: false required: [Prefix, Capability, Version] properties: Prefix: type: "string" x-nullable: false Capability: type: "string" x-nullable: false Version: type: "string" x-nullable: false PluginPrivilegeItem: description: | Describes a permission the user has to accept upon installing the plugin. type: "object" properties: Name: type: "string" example: "network" Description: type: "string" Value: type: "array" items: type: "string" example: - "host" Plugin: description: "A plugin for the Engine API" type: "object" required: [Settings, Enabled, Config, Name] properties: Id: type: "string" example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" Name: type: "string" x-nullable: false example: "tiborvass/sample-volume-plugin" Enabled: description: True if the plugin is running. False if the plugin is not running, only installed. type: "boolean" x-nullable: false example: true Settings: description: "Settings that can be modified by users." type: "object" x-nullable: false required: [Args, Devices, Env, Mounts] properties: Mounts: type: "array" items: $ref: "#/definitions/PluginMount" Env: type: "array" items: type: "string" example: - "DEBUG=0" Args: type: "array" items: type: "string" Devices: type: "array" items: $ref: "#/definitions/PluginDevice" PluginReference: description: "plugin remote reference used to push/pull the plugin" type: "string" x-nullable: false example: "localhost:5000/tiborvass/sample-volume-plugin:latest" Config: description: "The config of a plugin." type: "object" x-nullable: false required: - Description - Documentation - Interface - Entrypoint - WorkDir - Network - Linux - PidHost - PropagatedMount - IpcHost - Mounts - Env - Args properties: DockerVersion: description: "Docker Version used to create the plugin" type: "string" x-nullable: false example: "17.06.0-ce" Description: type: "string" x-nullable: false example: "A sample volume plugin for Docker" Documentation: type: "string" x-nullable: false example: "https://docs.docker.com/engine/extend/plugins/" Interface: description: "The interface between Docker and the plugin" x-nullable: false type: "object" required: [Types, Socket] properties: Types: type: "array" items: $ref: "#/definitions/PluginInterfaceType" example: - "docker.volumedriver/1.0" Socket: type: "string" x-nullable: false example: "plugins.sock" ProtocolScheme: type: "string" example: "some.protocol/v1.0" description: "Protocol to use for clients connecting to the plugin." enum: - "" - "moby.plugins.http/v1" Entrypoint: type: "array" items: type: "string" example: - "/usr/bin/sample-volume-plugin" - "/data" WorkDir: type: "string" x-nullable: false example: "/bin/" User: type: "object" x-nullable: false properties: UID: type: "integer" format: "uint32" example: 1000 GID: type: "integer" format: "uint32" example: 1000 Network: type: "object" x-nullable: false required: [Type] properties: Type: x-nullable: false type: "string" example: "host" Linux: type: "object" x-nullable: false required: [Capabilities, AllowAllDevices, Devices] properties: Capabilities: type: "array" items: type: "string" example: - "CAP_SYS_ADMIN" - "CAP_SYSLOG" AllowAllDevices: type: "boolean" x-nullable: false example: false Devices: type: "array" items: $ref: "#/definitions/PluginDevice" PropagatedMount: type: "string" x-nullable: false example: "/mnt/volumes" IpcHost: type: "boolean" x-nullable: false example: false PidHost: type: "boolean" x-nullable: false example: false Mounts: type: "array" items: $ref: "#/definitions/PluginMount" Env: type: "array" items: $ref: "#/definitions/PluginEnv" example: - Name: "DEBUG" Description: "If set, prints debug messages" Settable: null Value: "0" Args: type: "object" x-nullable: false required: [Name, Description, Settable, Value] properties: Name: x-nullable: false type: "string" example: "args" Description: x-nullable: false type: "string" example: "command line arguments" Settable: type: "array" items: type: "string" Value: type: "array" items: type: "string" rootfs: type: "object" properties: type: type: "string" example: "layers" diff_ids: type: "array" items: type: "string" example: - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" ObjectVersion: description: | The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. The client must send the version number along with the modified specification when updating these objects. This approach ensures safe concurrency and determinism in that the change on the object may not be applied if the version number has changed from the last read. In other words, if two update requests specify the same base version, only one of the requests can succeed. As a result, two separate update requests that happen at the same time will not unintentionally overwrite each other. type: "object" properties: Index: type: "integer" format: "uint64" example: 373531 NodeSpec: type: "object" properties: Name: description: "Name for the node." type: "string" example: "my-node" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Role: description: "Role of the node." type: "string" enum: - "worker" - "manager" example: "manager" Availability: description: "Availability of the node." type: "string" enum: - "active" - "pause" - "drain" example: "active" example: Availability: "active" Name: "node-name" Role: "manager" Labels: foo: "bar" Node: type: "object" properties: ID: type: "string" example: "24ifsmvkjbyhk" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: description: | Date and time at which the node was added to the swarm in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: description: | Date and time at which the node was last updated in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/NodeSpec" Description: $ref: "#/definitions/NodeDescription" Status: $ref: "#/definitions/NodeStatus" ManagerStatus: $ref: "#/definitions/ManagerStatus" NodeDescription: description: | NodeDescription encapsulates the properties of the Node as reported by the agent. type: "object" properties: Hostname: type: "string" example: "bf3067039e47" Platform: $ref: "#/definitions/Platform" Resources: $ref: "#/definitions/ResourceObject" Engine: $ref: "#/definitions/EngineDescription" TLSInfo: $ref: "#/definitions/TLSInfo" Platform: description: | Platform represents the platform (Arch/OS). type: "object" properties: Architecture: description: | Architecture represents the hardware architecture (for example, `x86_64`). type: "string" example: "x86_64" OS: description: | OS represents the Operating System (for example, `linux` or `windows`). type: "string" example: "linux" EngineDescription: description: "EngineDescription provides information about an engine." type: "object" properties: EngineVersion: type: "string" example: "17.06.0" Labels: type: "object" additionalProperties: type: "string" example: foo: "bar" Plugins: type: "array" items: type: "object" properties: Type: type: "string" Name: type: "string" example: - Type: "Log" Name: "awslogs" - Type: "Log" Name: "fluentd" - Type: "Log" Name: "gcplogs" - Type: "Log" Name: "gelf" - Type: "Log" Name: "journald" - Type: "Log" Name: "json-file" - Type: "Log" Name: "logentries" - Type: "Log" Name: "splunk" - Type: "Log" Name: "syslog" - Type: "Network" Name: "bridge" - Type: "Network" Name: "host" - Type: "Network" Name: "ipvlan" - Type: "Network" Name: "macvlan" - Type: "Network" Name: "null" - Type: "Network" Name: "overlay" - Type: "Volume" Name: "local" - Type: "Volume" Name: "localhost:5000/vieux/sshfs:latest" - Type: "Volume" Name: "vieux/sshfs:latest" TLSInfo: description: | Information about the issuer of leaf TLS certificates and the trusted root CA certificate. type: "object" properties: TrustRoot: description: | The root CA certificate(s) that are used to validate leaf TLS certificates. type: "string" CertIssuerSubject: description: The base64-url-safe-encoded raw subject bytes of the issuer. type: "string" CertIssuerPublicKey: description: | The base64-url-safe-encoded raw public key bytes of the issuer. type: "string" example: TrustRoot: | -----BEGIN CERTIFICATE----- MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H -----END CERTIFICATE----- CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" NodeStatus: description: | NodeStatus represents the status of a node. It provides the current status of the node, as seen by the manager. type: "object" properties: State: $ref: "#/definitions/NodeState" Message: type: "string" example: "" Addr: description: "IP address of the node." type: "string" example: "172.17.0.2" NodeState: description: "NodeState represents the state of a node." type: "string" enum: - "unknown" - "down" - "ready" - "disconnected" example: "ready" ManagerStatus: description: | ManagerStatus represents the status of a manager. It provides the current status of a node's manager component, if the node is a manager. x-nullable: true type: "object" properties: Leader: type: "boolean" default: false example: true Reachability: $ref: "#/definitions/Reachability" Addr: description: | The IP address and port at which the manager is reachable. type: "string" example: "10.0.0.46:2377" Reachability: description: "Reachability represents the reachability of a node." type: "string" enum: - "unknown" - "unreachable" - "reachable" example: "reachable" SwarmSpec: description: "User modifiable swarm configuration." type: "object" properties: Name: description: "Name of the swarm." type: "string" example: "default" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.corp.type: "production" com.example.corp.department: "engineering" Orchestration: description: "Orchestration configuration." type: "object" x-nullable: true properties: TaskHistoryRetentionLimit: description: | The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks. type: "integer" format: "int64" example: 10 Raft: description: "Raft configuration." type: "object" properties: SnapshotInterval: description: "The number of log entries between snapshots." type: "integer" format: "uint64" example: 10000 KeepOldSnapshots: description: | The number of snapshots to keep beyond the current snapshot. type: "integer" format: "uint64" LogEntriesForSlowFollowers: description: | The number of log entries to keep around to sync up slow followers after a snapshot is created. type: "integer" format: "uint64" example: 500 ElectionTick: description: | The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 3 HeartbeatTick: description: | The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 1 Dispatcher: description: "Dispatcher configuration." type: "object" x-nullable: true properties: HeartbeatPeriod: description: | The delay for an agent to send a heartbeat to the dispatcher. type: "integer" format: "int64" example: 5000000000 CAConfig: description: "CA configuration." type: "object" x-nullable: true properties: NodeCertExpiry: description: "The duration node certificates are issued for." type: "integer" format: "int64" example: 7776000000000000 ExternalCAs: description: | Configuration for forwarding signing requests to an external certificate authority. type: "array" items: type: "object" properties: Protocol: description: | Protocol for communication with the external CA (currently only `cfssl` is supported). type: "string" enum: - "cfssl" default: "cfssl" URL: description: | URL where certificate signing requests should be sent. type: "string" Options: description: | An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver. type: "object" additionalProperties: type: "string" CACert: description: | The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided). type: "string" SigningCACert: description: | The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format. type: "string" SigningCAKey: description: | The desired signing CA key for all swarm node TLS leaf certificates, in PEM format. type: "string" ForceRotate: description: | An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey` format: "uint64" type: "integer" EncryptionConfig: description: "Parameters related to encryption-at-rest." type: "object" properties: AutoLockManagers: description: | If set, generate a key and use it to lock data stored on the managers. type: "boolean" example: false TaskDefaults: description: "Defaults for creating tasks in this cluster." type: "object" properties: LogDriver: description: | The log driver to use for tasks created in the orchestrator if unspecified by a service. Updating this value only affects new tasks. Existing tasks continue to use their previously configured log driver until recreated. type: "object" properties: Name: description: | The log driver to use as a default for new tasks. type: "string" example: "json-file" Options: description: | Driver-specific options for the selectd log driver, specified as key/value pairs. type: "object" additionalProperties: type: "string" example: "max-file": "10" "max-size": "100m" # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but # without `JoinTokens`. ClusterInfo: description: | ClusterInfo represents information about the swarm as is returned by the "/info" endpoint. Join-tokens are not included. x-nullable: true type: "object" properties: ID: description: "The ID of the swarm." type: "string" example: "abajmipo7b4xz5ip2nrla6b11" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: description: | Date and time at which the swarm was initialised in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: description: | Date and time at which the swarm was last updated in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/SwarmSpec" TLSInfo: $ref: "#/definitions/TLSInfo" RootRotationInProgress: description: | Whether there is currently a root CA rotation in progress for the swarm type: "boolean" example: false DataPathPort: description: | DataPathPort specifies the data path port number for data traffic. Acceptable port range is 1024 to 49151. If no port is set or is set to 0, the default port (4789) is used. type: "integer" format: "uint32" default: 4789 example: 4789 DefaultAddrPool: description: | Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" format: "CIDR" example: ["10.10.0.0/16", "20.20.0.0/16"] SubnetSize: description: | SubnetSize specifies the subnet size of the networks created from the default subnet pool. type: "integer" format: "uint32" maximum: 29 default: 24 example: 24 JoinTokens: description: | JoinTokens contains the tokens workers and managers need to join the swarm. type: "object" properties: Worker: description: | The token workers can use to join the swarm. type: "string" example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" Manager: description: | The token managers can use to join the swarm. type: "string" example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" Swarm: type: "object" allOf: - $ref: "#/definitions/ClusterInfo" - type: "object" properties: JoinTokens: $ref: "#/definitions/JoinTokens" TaskSpec: description: "User modifiable task configuration." type: "object" properties: PluginSpec: type: "object" description: | Plugin spec for the service. *(Experimental release only.)* <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. properties: Name: description: "The name or 'alias' to use for the plugin." type: "string" Remote: description: "The plugin image reference to use." type: "string" Disabled: description: "Disable the plugin once scheduled." type: "boolean" PluginPrivilege: type: "array" items: $ref: "#/definitions/PluginPrivilegeItem" ContainerSpec: type: "object" description: | Container spec for the service. <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. properties: Image: description: "The image name to use for the container" type: "string" Labels: description: "User-defined key/value data." type: "object" additionalProperties: type: "string" Command: description: "The command to be run in the image." type: "array" items: type: "string" Args: description: "Arguments to the command." type: "array" items: type: "string" Hostname: description: | The hostname to use for the container, as a valid [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. type: "string" Env: description: | A list of environment variables in the form `VAR=value`. type: "array" items: type: "string" Dir: description: "The working directory for commands to run in." type: "string" User: description: "The user inside the container." type: "string" Groups: type: "array" description: | A list of additional groups that the container process will run as. items: type: "string" Privileges: type: "object" description: "Security options for the container" properties: CredentialSpec: type: "object" description: "CredentialSpec for managed service account (Windows only)" properties: Config: type: "string" example: "0bt9dmxjvjiqermk6xrop3ekq" description: | Load credential spec from a Swarm Config with the given ID. The specified config must also be present in the Configs field with the Runtime property set. <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. File: type: "string" example: "spec.json" description: | Load credential spec from this file. The file is read by the daemon, and must be present in the `CredentialSpecs` subdirectory in the docker data directory, which defaults to `C:\ProgramData\Docker\` on Windows. For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`. <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. Registry: type: "string" description: | Load credential spec from this value in the Windows registry. The specified registry value must be located in: `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. SELinuxContext: type: "object" description: "SELinux labels of the container" properties: Disable: type: "boolean" description: "Disable SELinux" User: type: "string" description: "SELinux user label" Role: type: "string" description: "SELinux role label" Type: type: "string" description: "SELinux type label" Level: type: "string" description: "SELinux level label" TTY: description: "Whether a pseudo-TTY should be allocated." type: "boolean" OpenStdin: description: "Open `stdin`" type: "boolean" ReadOnly: description: "Mount the container's root filesystem as read only." type: "boolean" Mounts: description: | Specification for mounts to be added to containers created as part of the service. type: "array" items: $ref: "#/definitions/Mount" StopSignal: description: "Signal to stop the container." type: "string" StopGracePeriod: description: | Amount of time to wait for the container to terminate before forcefully killing it. type: "integer" format: "int64" HealthCheck: $ref: "#/definitions/HealthConfig" Hosts: type: "array" description: | A list of hostname/IP mappings to add to the container's `hosts` file. The format of extra hosts is specified in the [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) man page: IP_address canonical_hostname [aliases...] items: type: "string" DNSConfig: description: | Specification for DNS related configurations in resolver configuration file (`resolv.conf`). type: "object" properties: Nameservers: description: "The IP addresses of the name servers." type: "array" items: type: "string" Search: description: "A search list for host-name lookup." type: "array" items: type: "string" Options: description: | A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.). type: "array" items: type: "string" Secrets: description: | Secrets contains references to zero or more secrets that will be exposed to the service. type: "array" items: type: "object" properties: File: description: | File represents a specific target that is backed by a file. type: "object" properties: Name: description: | Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." type: "string" GID: description: "GID represents the file GID." type: "string" Mode: description: "Mode represents the FileMode of the file." type: "integer" format: "uint32" SecretID: description: | SecretID represents the ID of the specific secret that we're referencing. type: "string" SecretName: description: | SecretName is the name of the secret that this references, but this is just provided for lookup/display purposes. The secret in the reference will be identified by its ID. type: "string" Configs: description: | Configs contains references to zero or more configs that will be exposed to the service. type: "array" items: type: "object" properties: File: description: | File represents a specific target that is backed by a file. <p><br /><p> > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive type: "object" properties: Name: description: | Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." type: "string" GID: description: "GID represents the file GID." type: "string" Mode: description: "Mode represents the FileMode of the file." type: "integer" format: "uint32" Runtime: description: | Runtime represents a target that is not mounted into the container but is used by the task <p><br /><p> > **Note**: `Configs.File` and `Configs.Runtime` are mutually > exclusive type: "object" ConfigID: description: | ConfigID represents the ID of the specific config that we're referencing. type: "string" ConfigName: description: | ConfigName is the name of the config that this references, but this is just provided for lookup/display purposes. The config in the reference will be identified by its ID. type: "string" Isolation: type: "string" description: | Isolation technology of the containers running the service. (Windows only) enum: - "default" - "process" - "hyperv" Init: description: | Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. type: "boolean" x-nullable: true Sysctls: description: | Set kernel namedspaced parameters (sysctls) in the container. The Sysctls option on services accepts the same sysctls as the are supported on containers. Note that while the same sysctls are supported, no guarantees or checks are made about their suitability for a clustered environment, and it's up to the user to determine whether a given sysctl will work properly in a Service. type: "object" additionalProperties: type: "string" # This option is not used by Windows containers CapabilityAdd: type: "array" description: | A list of kernel capabilities to add to the default set for the container. items: type: "string" example: - "CAP_NET_RAW" - "CAP_SYS_ADMIN" - "CAP_SYS_CHROOT" - "CAP_SYSLOG" CapabilityDrop: type: "array" description: | A list of kernel capabilities to drop from the default set for the container. items: type: "string" example: - "CAP_NET_RAW" Ulimits: description: | A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" type: "array" items: type: "object" properties: Name: description: "Name of ulimit" type: "string" Soft: description: "Soft limit" type: "integer" Hard: description: "Hard limit" type: "integer" NetworkAttachmentSpec: description: | Read-only spec type for non-swarm containers attached to swarm overlay networks. <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. type: "object" properties: ContainerID: description: "ID of the container represented by this task" type: "string" Resources: description: | Resource requirements which apply to each individual container created as part of the service. type: "object" properties: Limits: description: "Define resources limits." $ref: "#/definitions/Limit" Reservation: description: "Define resources reservation." $ref: "#/definitions/ResourceObject" RestartPolicy: description: | Specification for the restart policy which applies to containers created as part of this service. type: "object" properties: Condition: description: "Condition for restart." type: "string" enum: - "none" - "on-failure" - "any" Delay: description: "Delay between restart attempts." type: "integer" format: "int64" MaxAttempts: description: | Maximum attempts to restart a given container before giving up (default value is 0, which is ignored). type: "integer" format: "int64" default: 0 Window: description: | Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded). type: "integer" format: "int64" default: 0 Placement: type: "object" properties: Constraints: description: | An array of constraint expressions to limit the set of nodes where a task can be scheduled. Constraint expressions can either use a _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find nodes that satisfy every expression (AND match). Constraints can match node or Docker Engine labels as follows: node attribute | matches | example ---------------------|--------------------------------|----------------------------------------------- `node.id` | Node ID | `node.id==2ivku8v2gvtg4` `node.hostname` | Node hostname | `node.hostname!=node-2` `node.role` | Node role (`manager`/`worker`) | `node.role==manager` `node.platform.os` | Node operating system | `node.platform.os==windows` `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` `node.labels` | User-defined node labels | `node.labels.security==high` `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` `engine.labels` apply to Docker Engine labels like operating system, drivers, etc. Swarm administrators add `node.labels` for operational purposes by using the [`node update endpoint`](#operation/NodeUpdate). type: "array" items: type: "string" example: - "node.hostname!=node3.corp.example.com" - "node.role!=manager" - "node.labels.type==production" - "node.platform.os==linux" - "node.platform.arch==x86_64" Preferences: description: | Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence. type: "array" items: type: "object" properties: Spread: type: "object" properties: SpreadDescriptor: description: | label descriptor, such as `engine.labels.az`. type: "string" example: - Spread: SpreadDescriptor: "node.labels.datacenter" - Spread: SpreadDescriptor: "node.labels.rack" MaxReplicas: description: | Maximum number of replicas for per node (default value is 0, which is unlimited) type: "integer" format: "int64" default: 0 Platforms: description: | Platforms stores all the platforms that the service's image can run on. This field is used in the platform filter for scheduling. If empty, then the platform filter is off, meaning there are no scheduling restrictions. type: "array" items: $ref: "#/definitions/Platform" ForceUpdate: description: | A counter that triggers an update even if no relevant parameters have been changed. type: "integer" Runtime: description: | Runtime is the type of runtime specified for the task executor. type: "string" Networks: description: "Specifies which networks the service should attach to." type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" LogDriver: description: | Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified. type: "object" properties: Name: type: "string" Options: type: "object" additionalProperties: type: "string" TaskState: type: "string" enum: - "new" - "allocated" - "pending" - "assigned" - "accepted" - "preparing" - "ready" - "starting" - "running" - "complete" - "shutdown" - "failed" - "rejected" - "remove" - "orphaned" Task: type: "object" properties: ID: description: "The ID of the task." type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Name: description: "Name of the task." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Spec: $ref: "#/definitions/TaskSpec" ServiceID: description: "The ID of the service this task is part of." type: "string" Slot: type: "integer" NodeID: description: "The ID of the node that this task is on." type: "string" AssignedGenericResources: $ref: "#/definitions/GenericResources" Status: type: "object" properties: Timestamp: type: "string" format: "dateTime" State: $ref: "#/definitions/TaskState" Message: type: "string" Err: type: "string" ContainerStatus: type: "object" properties: ContainerID: type: "string" PID: type: "integer" ExitCode: type: "integer" DesiredState: $ref: "#/definitions/TaskState" JobIteration: description: | If the Service this Task belongs to is a job-mode service, contains the JobIteration of the Service this Task was created for. Absent if the Task was created for a Replicated or Global Service. $ref: "#/definitions/ObjectVersion" example: ID: "0kzzo1i0y4jz6027t0k7aezc7" Version: Index: 71 CreatedAt: "2016-06-07T21:07:31.171892745Z" UpdatedAt: "2016-06-07T21:07:31.376370513Z" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:31.290032978Z" State: "running" Message: "started" ContainerStatus: ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" PID: 677 DesiredState: "running" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.10/16" AssignedGenericResources: - DiscreteResourceSpec: Kind: "SSD" Value: 3 - NamedResourceSpec: Kind: "GPU" Value: "UUID1" - NamedResourceSpec: Kind: "GPU" Value: "UUID2" ServiceSpec: description: "User modifiable configuration for a service." properties: Name: description: "Name of the service." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" TaskTemplate: $ref: "#/definitions/TaskSpec" Mode: description: "Scheduling mode for the service." type: "object" properties: Replicated: type: "object" properties: Replicas: type: "integer" format: "int64" Global: type: "object" ReplicatedJob: description: | The mode used for services with a finite number of tasks that run to a completed state. type: "object" properties: MaxConcurrent: description: | The maximum number of replicas to run simultaneously. type: "integer" format: "int64" default: 1 TotalCompletions: description: | The total number of replicas desired to reach the Completed state. If unset, will default to the value of `MaxConcurrent` type: "integer" format: "int64" GlobalJob: description: | The mode used for services which run a task to the completed state on each valid node. type: "object" UpdateConfig: description: "Specification for the update strategy of the service." type: "object" properties: Parallelism: description: | Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism). type: "integer" format: "int64" Delay: description: "Amount of time between updates, in nanoseconds." type: "integer" format: "int64" FailureAction: description: | Action to take if an updated task fails to run, or stops running during the update. type: "string" enum: - "continue" - "pause" - "rollback" Monitor: description: | Amount of time to monitor each updated task for failures, in nanoseconds. type: "integer" format: "int64" MaxFailureRatio: description: | The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1. type: "number" default: 0 Order: description: | The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down. type: "string" enum: - "stop-first" - "start-first" RollbackConfig: description: "Specification for the rollback strategy of the service." type: "object" properties: Parallelism: description: | Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism). type: "integer" format: "int64" Delay: description: | Amount of time between rollback iterations, in nanoseconds. type: "integer" format: "int64" FailureAction: description: | Action to take if an rolled back task fails to run, or stops running during the rollback. type: "string" enum: - "continue" - "pause" Monitor: description: | Amount of time to monitor each rolled back task for failures, in nanoseconds. type: "integer" format: "int64" MaxFailureRatio: description: | The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1. type: "number" default: 0 Order: description: | The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down. type: "string" enum: - "stop-first" - "start-first" Networks: description: "Specifies which networks the service should attach to." type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" EndpointSpec: $ref: "#/definitions/EndpointSpec" EndpointPortConfig: type: "object" properties: Name: type: "string" Protocol: type: "string" enum: - "tcp" - "udp" - "sctp" TargetPort: description: "The port inside the container." type: "integer" PublishedPort: description: "The port on the swarm hosts." type: "integer" PublishMode: description: | The mode in which port is published. <p><br /></p> - "ingress" makes the target port accessible on every node, regardless of whether there is a task for the service running on that node or not. - "host" bypasses the routing mesh and publish the port directly on the swarm node where that service is running. type: "string" enum: - "ingress" - "host" default: "ingress" example: "ingress" EndpointSpec: description: "Properties that can be configured to access and load balance a service." type: "object" properties: Mode: description: | The mode of resolution to use for internal load balancing between tasks. type: "string" enum: - "vip" - "dnsrr" default: "vip" Ports: description: | List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used. type: "array" items: $ref: "#/definitions/EndpointPortConfig" Service: type: "object" properties: ID: type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Spec: $ref: "#/definitions/ServiceSpec" Endpoint: type: "object" properties: Spec: $ref: "#/definitions/EndpointSpec" Ports: type: "array" items: $ref: "#/definitions/EndpointPortConfig" VirtualIPs: type: "array" items: type: "object" properties: NetworkID: type: "string" Addr: type: "string" UpdateStatus: description: "The status of a service update." type: "object" properties: State: type: "string" enum: - "updating" - "paused" - "completed" StartedAt: type: "string" format: "dateTime" CompletedAt: type: "string" format: "dateTime" Message: type: "string" ServiceStatus: description: | The status of the service's tasks. Provided only when requested as part of a ServiceList operation. type: "object" properties: RunningTasks: description: | The number of tasks for the service currently in the Running state. type: "integer" format: "uint64" example: 7 DesiredTasks: description: | The number of tasks for the service desired to be running. For replicated services, this is the replica count from the service spec. For global services, this is computed by taking count of all tasks for the service with a Desired State other than Shutdown. type: "integer" format: "uint64" example: 10 CompletedTasks: description: | The number of tasks for a job that are in the Completed state. This field must be cross-referenced with the service type, as the value of 0 may mean the service is not in a job mode, or it may mean the job-mode service has no tasks yet Completed. type: "integer" format: "uint64" JobStatus: description: | The status of the service when it is in one of ReplicatedJob or GlobalJob modes. Absent on Replicated and Global mode services. The JobIteration is an ObjectVersion, but unlike the Service's version, does not need to be sent with an update request. type: "object" properties: JobIteration: description: | JobIteration is a value increased each time a Job is executed, successfully or otherwise. "Executed", in this case, means the job as a whole has been started, not that an individual Task has been launched. A job is "Executed" when its ServiceSpec is updated. JobIteration can be used to disambiguate Tasks belonging to different executions of a job. Though JobIteration will increase with each subsequent execution, it may not necessarily increase by 1, and so JobIteration should not be used to $ref: "#/definitions/ObjectVersion" LastExecution: description: | The last time, as observed by the server, that this job was started. type: "string" format: "dateTime" example: ID: "9mnpnzenvg8p8tdbtq4wvbkcz" Version: Index: 19 CreatedAt: "2016-06-07T21:05:51.880065305Z" UpdatedAt: "2016-06-07T21:07:29.962229872Z" Spec: Name: "hopeful_cori" TaskTemplate: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ForceUpdate: 0 Mode: Replicated: Replicas: 1 UpdateConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Mode: "vip" Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 Endpoint: Spec: Mode: "vip" Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 VirtualIPs: - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" Addr: "10.255.0.2/16" - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" Addr: "10.255.0.3/16" ImageDeleteResponseItem: type: "object" properties: Untagged: description: "The image ID of an image that was untagged" type: "string" Deleted: description: "The image ID of an image that was deleted" type: "string" ServiceUpdateResponse: type: "object" properties: Warnings: description: "Optional warning messages" type: "array" items: type: "string" example: Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" ContainerSummary: type: "object" properties: Id: description: "The ID of this container" type: "string" x-go-name: "ID" Names: description: "The names that this container has been given" type: "array" items: type: "string" Image: description: "The name of the image used when creating this container" type: "string" ImageID: description: "The ID of the image that this container was created from" type: "string" Command: description: "Command to run when starting the container" type: "string" Created: description: "When the container was created" type: "integer" format: "int64" Ports: description: "The ports exposed by this container" type: "array" items: $ref: "#/definitions/Port" SizeRw: description: "The size of files that have been created or changed by this container" type: "integer" format: "int64" SizeRootFs: description: "The total size of all the files in this container" type: "integer" format: "int64" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" State: description: "The state of this container (e.g. `Exited`)" type: "string" Status: description: "Additional human-readable status of this container (e.g. `Exit 0`)" type: "string" HostConfig: type: "object" properties: NetworkMode: type: "string" NetworkSettings: description: "A summary of the container's network settings" type: "object" properties: Networks: type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" Mounts: type: "array" items: $ref: "#/definitions/Mount" Driver: description: "Driver represents a driver (network, logging, secrets)." type: "object" required: [Name] properties: Name: description: "Name of the driver." type: "string" x-nullable: false example: "some-driver" Options: description: "Key/value map of driver-specific options." type: "object" x-nullable: false additionalProperties: type: "string" example: OptionA: "value for driver-specific option A" OptionB: "value for driver-specific option B" SecretSpec: type: "object" properties: Name: description: "User-defined name of the secret." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Data: description: | Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) data to store as secret. This field is only used to _create_ a secret, and is not returned by other endpoints. type: "string" example: "" Driver: description: | Name of the secrets driver used to fetch the secret's value from an external secret store. $ref: "#/definitions/Driver" Templating: description: | Templating driver, if applicable Templating controls whether and how to evaluate the config payload as a template. If no driver is set, no templating is used. $ref: "#/definitions/Driver" Secret: type: "object" properties: ID: type: "string" example: "blt1owaxmitz71s9v5zh81zun" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" example: "2017-07-20T13:55:28.678958722Z" UpdatedAt: type: "string" format: "dateTime" example: "2017-07-20T13:55:28.678958722Z" Spec: $ref: "#/definitions/SecretSpec" ConfigSpec: type: "object" properties: Name: description: "User-defined name of the config." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Data: description: | Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) config data. type: "string" Templating: description: | Templating driver, if applicable Templating controls whether and how to evaluate the config payload as a template. If no driver is set, no templating is used. $ref: "#/definitions/Driver" Config: type: "object" properties: ID: type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Spec: $ref: "#/definitions/ConfigSpec" ContainerState: description: | ContainerState stores container's running state. It's part of ContainerJSONBase and will be returned by the "inspect" command. type: "object" properties: Status: description: | String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead". type: "string" enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] example: "running" Running: description: | Whether this container is running. Note that a running container can be _paused_. The `Running` and `Paused` booleans are not mutually exclusive: When pausing a container (on Linux), the freezer cgroup is used to suspend all processes in the container. Freezing the process requires the process to be running. As a result, paused containers are both `Running` _and_ `Paused`. Use the `Status` field instead to determine if a container's state is "running". type: "boolean" example: true Paused: description: "Whether this container is paused." type: "boolean" example: false Restarting: description: "Whether this container is restarting." type: "boolean" example: false OOMKilled: description: | Whether this container has been killed because it ran out of memory. type: "boolean" example: false Dead: type: "boolean" example: false Pid: description: "The process ID of this container" type: "integer" example: 1234 ExitCode: description: "The last exit code of this container" type: "integer" example: 0 Error: type: "string" StartedAt: description: "The time when this container was last started." type: "string" example: "2020-01-06T09:06:59.461876391Z" FinishedAt: description: "The time when this container last exited." type: "string" example: "2020-01-06T09:07:59.461876391Z" Health: x-nullable: true $ref: "#/definitions/Health" SystemVersion: type: "object" description: | Response of Engine API: GET "/version" properties: Platform: type: "object" required: [Name] properties: Name: type: "string" Components: type: "array" description: | Information about system components items: type: "object" x-go-name: ComponentVersion required: [Name, Version] properties: Name: description: | Name of the component type: "string" example: "Engine" Version: description: | Version of the component type: "string" x-nullable: false example: "19.03.12" Details: description: | Key/value pairs of strings with additional information about the component. These values are intended for informational purposes only, and their content is not defined, and not part of the API specification. These messages can be printed by the client as information to the user. type: "object" x-nullable: true Version: description: "The version of the daemon" type: "string" example: "19.03.12" ApiVersion: description: | The default (and highest) API version that is supported by the daemon type: "string" example: "1.40" MinAPIVersion: description: | The minimum API version that is supported by the daemon type: "string" example: "1.12" GitCommit: description: | The Git commit of the source code that was used to build the daemon type: "string" example: "48a66213fe" GoVersion: description: | The version Go used to compile the daemon, and the version of the Go runtime in use. type: "string" example: "go1.13.14" Os: description: | The operating system that the daemon is running on ("linux" or "windows") type: "string" example: "linux" Arch: description: | The architecture that the daemon is running on type: "string" example: "amd64" KernelVersion: description: | The kernel version (`uname -r`) that the daemon is running on. This field is omitted when empty. type: "string" example: "4.19.76-linuxkit" Experimental: description: | Indicates if the daemon is started with experimental features enabled. This field is omitted when empty / false. type: "boolean" example: true BuildTime: description: | The date and time that the daemon was compiled. type: "string" example: "2020-06-22T15:49:27.000000000+00:00" SystemInfo: type: "object" properties: ID: description: | Unique identifier of the daemon. <p><br /></p> > **Note**: The format of the ID itself is not part of the API, and > should not be considered stable. type: "string" example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" Containers: description: "Total number of containers on the host." type: "integer" example: 14 ContainersRunning: description: | Number of containers with status `"running"`. type: "integer" example: 3 ContainersPaused: description: | Number of containers with status `"paused"`. type: "integer" example: 1 ContainersStopped: description: | Number of containers with status `"stopped"`. type: "integer" example: 10 Images: description: | Total number of images on the host. Both _tagged_ and _untagged_ (dangling) images are counted. type: "integer" example: 508 Driver: description: "Name of the storage driver in use." type: "string" example: "overlay2" DriverStatus: description: | Information specific to the storage driver, provided as "label" / "value" pairs. This information is provided by the storage driver, and formatted in a way consistent with the output of `docker info` on the command line. <p><br /></p> > **Note**: The information returned in this field, including the > formatting of values and labels, should not be considered stable, > and may change without notice. type: "array" items: type: "array" items: type: "string" example: - ["Backing Filesystem", "extfs"] - ["Supports d_type", "true"] - ["Native Overlay Diff", "true"] DockerRootDir: description: | Root directory of persistent Docker state. Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` on Windows. type: "string" example: "/var/lib/docker" Plugins: $ref: "#/definitions/PluginsInfo" MemoryLimit: description: "Indicates if the host has memory limit support enabled." type: "boolean" example: true SwapLimit: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true KernelMemory: description: | Indicates if the host has kernel memory limit support enabled. <p><br /></p> > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated > `kmem.limit_in_bytes`. type: "boolean" example: true CpuCfsPeriod: description: | Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host. type: "boolean" example: true CpuCfsQuota: description: | Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host. type: "boolean" example: true CPUShares: description: | Indicates if CPU Shares limiting is supported by the host. type: "boolean" example: true CPUSet: description: | Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) type: "boolean" example: true PidsLimit: description: "Indicates if the host kernel has PID limit support enabled." type: "boolean" example: true OomKillDisable: description: "Indicates if OOM killer disable is supported on the host." type: "boolean" IPv4Forwarding: description: "Indicates IPv4 forwarding is enabled." type: "boolean" example: true BridgeNfIptables: description: "Indicates if `bridge-nf-call-iptables` is available on the host." type: "boolean" example: true BridgeNfIp6tables: description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." type: "boolean" example: true Debug: description: | Indicates if the daemon is running in debug-mode / with debug-level logging enabled. type: "boolean" example: true NFd: description: | The total number of file Descriptors in use by the daemon process. This information is only returned if debug-mode is enabled. type: "integer" example: 64 NGoroutines: description: | The number of goroutines that currently exist. This information is only returned if debug-mode is enabled. type: "integer" example: 174 SystemTime: description: | Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" example: "2017-08-08T20:28:29.06202363Z" LoggingDriver: description: | The logging driver to use as a default for new containers. type: "string" CgroupDriver: description: | The driver to use for managing cgroups. type: "string" enum: ["cgroupfs", "systemd", "none"] default: "cgroupfs" example: "cgroupfs" CgroupVersion: description: | The version of the cgroup. type: "string" enum: ["1", "2"] default: "1" example: "1" NEventsListener: description: "Number of event listeners subscribed." type: "integer" example: 30 KernelVersion: description: | Kernel version of the host. On Linux, this information obtained from `uname`. On Windows this information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd> registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. type: "string" example: "4.9.38-moby" OperatingSystem: description: | Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" or "Windows Server 2016 Datacenter" type: "string" example: "Alpine Linux v3.5" OSVersion: description: | Version of the host's operating system <p><br /></p> > **Note**: The information returned in this field, including its > very existence, and the formatting of values, should not be considered > stable, and may change without notice. type: "string" example: "16.04" OSType: description: | Generic type of the operating system of the host, as returned by the Go runtime (`GOOS`). Currently returned values are "linux" and "windows". A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). type: "string" example: "linux" Architecture: description: | Hardware architecture of the host, as returned by the Go runtime (`GOARCH`). A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). type: "string" example: "x86_64" NCPU: description: | The number of logical CPUs usable by the daemon. The number of available CPUs is checked by querying the operating system when the daemon starts. Changes to operating system CPU allocation after the daemon is started are not reflected. type: "integer" example: 4 MemTotal: description: | Total amount of physical memory available on the host, in bytes. type: "integer" format: "int64" example: 2095882240 IndexServerAddress: description: | Address / URL of the index server that is used for image search, and as a default for user authentication for Docker Hub and Docker Cloud. default: "https://index.docker.io/v1/" type: "string" example: "https://index.docker.io/v1/" RegistryConfig: $ref: "#/definitions/RegistryServiceConfig" GenericResources: $ref: "#/definitions/GenericResources" HttpProxy: description: | HTTP-proxy configured for the daemon. This value is obtained from the [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL are masked in the API response. Containers do not automatically inherit this configuration. type: "string" example: "http://xxxxx:[email protected]:8080" HttpsProxy: description: | HTTPS-proxy configured for the daemon. This value is obtained from the [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL are masked in the API response. Containers do not automatically inherit this configuration. type: "string" example: "https://xxxxx:[email protected]:4443" NoProxy: description: | Comma-separated list of domain extensions for which no proxy should be used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Containers do not automatically inherit this configuration. type: "string" example: "*.local, 169.254/16" Name: description: "Hostname of the host." type: "string" example: "node5.corp.example.com" Labels: description: | User-defined labels (key/value metadata) as set on the daemon. <p><br /></p> > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, > set through the daemon configuration, and _node_ labels, set from a > manager node in the Swarm. Node labels are not included in this > field. Node labels can be retrieved using the `/nodes/(id)` endpoint > on a manager node in the Swarm. type: "array" items: type: "string" example: ["storage=ssd", "production"] ExperimentalBuild: description: | Indicates if experimental features are enabled on the daemon. type: "boolean" example: true ServerVersion: description: | Version string of the daemon. > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) > returns the Swarm version instead of the daemon version, for example > `swarm/1.2.8`. type: "string" example: "17.06.0-ce" ClusterStore: description: | URL of the distributed storage backend. The storage backend is used for multihost networking (to store network and endpoint information) and by the node discovery mechanism. <p><br /></p> > **Deprecated**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. type: "string" example: "consul://consul.corp.example.com:8600/some/path" ClusterAdvertise: description: | The network endpoint that the Engine advertises for the purpose of node discovery. ClusterAdvertise is a `host:port` combination on which the daemon is reachable by other hosts. <p><br /></p> > **Deprecated**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. type: "string" example: "node5.corp.example.com:8000" Runtimes: description: | List of [OCI compliant](https://github.com/opencontainers/runtime-spec) runtimes configured on the daemon. Keys hold the "name" used to reference the runtime. The Docker daemon relies on an OCI compliant runtime (invoked via the `containerd` daemon) as its interface to the Linux kernel namespaces, cgroups, and SELinux. The default runtime is `runc`, and automatically configured. Additional runtimes can be configured by the user and will be listed here. type: "object" additionalProperties: $ref: "#/definitions/Runtime" default: runc: path: "runc" example: runc: path: "runc" runc-master: path: "/go/bin/runc" custom: path: "/usr/local/bin/my-oci-runtime" runtimeArgs: ["--debug", "--systemd-cgroup=false"] DefaultRuntime: description: | Name of the default OCI runtime that is used when starting containers. The default can be overridden per-container at create time. type: "string" default: "runc" example: "runc" Swarm: $ref: "#/definitions/SwarmInfo" LiveRestoreEnabled: description: | Indicates if live restore is enabled. If enabled, containers are kept running when the daemon is shutdown or upon daemon start if running containers are detected. type: "boolean" default: false example: false Isolation: description: | Represents the isolation technology to use as a default for containers. The supported values are platform-specific. If no isolation value is specified on daemon start, on Windows client, the default is `hyperv`, and on Windows server, the default is `process`. This option is currently not used on other platforms. default: "default" type: "string" enum: - "default" - "hyperv" - "process" InitBinary: description: | Name and, optional, path of the `docker-init` binary. If the path is omitted, the daemon searches the host's `$PATH` for the binary and uses the first result. type: "string" example: "docker-init" ContainerdCommit: $ref: "#/definitions/Commit" RuncCommit: $ref: "#/definitions/Commit" InitCommit: $ref: "#/definitions/Commit" SecurityOptions: description: | List of security features that are enabled on the daemon, such as apparmor, seccomp, SELinux, user-namespaces (userns), and rootless. Additional configuration options for each security feature may be present, and are included as a comma-separated list of key/value pairs. type: "array" items: type: "string" example: - "name=apparmor" - "name=seccomp,profile=default" - "name=selinux" - "name=userns" - "name=rootless" ProductLicense: description: | Reports a summary of the product license on the daemon. If a commercial license has been applied to the daemon, information such as number of nodes, and expiration are included. type: "string" example: "Community Engine" DefaultAddressPools: description: | List of custom default address pools for local networks, which can be specified in the daemon.json file or dockerd option. Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 10.10.[0-255].0/24 address pools. type: "array" items: type: "object" properties: Base: description: "The network address in CIDR format" type: "string" example: "10.10.0.0/16" Size: description: "The network pool size" type: "integer" example: "24" Warnings: description: | List of warnings / informational messages about missing features, or issues related to the daemon configuration. These messages can be printed by the client as information to the user. type: "array" items: type: "string" example: - "WARNING: No memory limit support" - "WARNING: bridge-nf-call-iptables is disabled" - "WARNING: bridge-nf-call-ip6tables is disabled" # PluginsInfo is a temp struct holding Plugins name # registered with docker daemon. It is used by Info struct PluginsInfo: description: | Available plugins per type. <p><br /></p> > **Note**: Only unmanaged (V1) plugins are included in this list. > V1 plugins are "lazily" loaded, and are not returned in this list > if there is no resource using the plugin. type: "object" properties: Volume: description: "Names of available volume-drivers, and network-driver plugins." type: "array" items: type: "string" example: ["local"] Network: description: "Names of available network-drivers, and network-driver plugins." type: "array" items: type: "string" example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] Authorization: description: "Names of available authorization plugins." type: "array" items: type: "string" example: ["img-authz-plugin", "hbm"] Log: description: "Names of available logging-drivers, and logging-driver plugins." type: "array" items: type: "string" example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] RegistryServiceConfig: description: | RegistryServiceConfig stores daemon registry services configuration. type: "object" x-nullable: true properties: AllowNondistributableArtifactsCIDRs: description: | List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). Some images (for example, Windows base images) contain artifacts whose distribution is restricted by license. When these images are pushed to a registry, restricted artifacts are not included. This configuration override this behavior, and enables the daemon to push nondistributable artifacts to all registries whose resolved IP address is within the subnet described by the CIDR syntax. This option is useful when pushing images containing nondistributable artifacts to a registry on an air-gapped network so hosts on that network can pull the images without connecting to another server. > **Warning**: Nondistributable artifacts typically have restrictions > on how and where they can be distributed and shared. Only use this > feature to push artifacts to private registries and ensure that you > are in compliance with any terms that cover redistributing > nondistributable artifacts. type: "array" items: type: "string" example: ["::1/128", "127.0.0.0/8"] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `<hostname>[:<port>]` or `<IP address>[:<port>]`. Some images (for example, Windows base images) contain artifacts whose distribution is restricted by license. When these images are pushed to a registry, restricted artifacts are not included. This configuration override this behavior for the specified registries. This option is useful when pushing images containing nondistributable artifacts to a registry on an air-gapped network so hosts on that network can pull the images without connecting to another server. > **Warning**: Nondistributable artifacts typically have restrictions > on how and where they can be distributed and shared. Only use this > feature to push artifacts to private registries and ensure that you > are in compliance with any terms that cover redistributing > nondistributable artifacts. type: "array" items: type: "string" example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. By default, local registries (`127.0.0.0/8`) are configured as insecure. All other registries are secure. Communicating with an insecure registry is not possible if the daemon assumes that registry is secure. This configuration override this behavior, insecure communication with registries whose resolved IP address is within the subnet described by the CIDR syntax. Registries can also be marked insecure by hostname. Those registries are listed under `IndexConfigs` and have their `Secure` field set to `false`. > **Warning**: Using this option can be useful when running a local > registry, but introduces security vulnerabilities. This option > should therefore ONLY be used for testing purposes. For increased > security, users should add their CA to their system's list of trusted > CAs instead of enabling this option. type: "array" items: type: "string" example: ["::1/128", "127.0.0.0/8"] IndexConfigs: type: "object" additionalProperties: $ref: "#/definitions/IndexInfo" example: "127.0.0.1:5000": "Name": "127.0.0.1:5000" "Mirrors": [] "Secure": false "Official": false "[2001:db8:a0b:12f0::1]:80": "Name": "[2001:db8:a0b:12f0::1]:80" "Mirrors": [] "Secure": false "Official": false "docker.io": Name: "docker.io" Mirrors: ["https://hub-mirror.corp.example.com:5000/"] Secure: true Official: true "registry.internal.corp.example.com:3000": Name: "registry.internal.corp.example.com:3000" Mirrors: [] Secure: false Official: false Mirrors: description: | List of registry URLs that act as a mirror for the official (`docker.io`) registry. type: "array" items: type: "string" example: - "https://hub-mirror.corp.example.com:5000/" - "https://[2001:db8:a0b:12f0::1]/" IndexInfo: description: IndexInfo contains information about a registry. type: "object" x-nullable: true properties: Name: description: | Name of the registry, such as "docker.io". type: "string" example: "docker.io" Mirrors: description: | List of mirrors, expressed as URIs. type: "array" items: type: "string" example: - "https://hub-mirror.corp.example.com:5000/" - "https://registry-2.docker.io/" - "https://registry-3.docker.io/" Secure: description: | Indicates if the registry is part of the list of insecure registries. If `false`, the registry is insecure. Insecure registries accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. > **Warning**: Insecure registries can be useful when running a local > registry. However, because its use creates security vulnerabilities > it should ONLY be enabled for testing purposes. For increased > security, users should add their CA to their system's list of > trusted CAs instead of enabling this option. type: "boolean" example: true Official: description: | Indicates whether this is an official registry (i.e., Docker Hub / docker.io) type: "boolean" example: true Runtime: description: | Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) runtime. The runtime is invoked by the daemon via the `containerd` daemon. OCI runtimes act as an interface to the Linux kernel namespaces, cgroups, and SELinux. type: "object" properties: path: description: | Name and, optional, path, of the OCI executable binary. If the path is omitted, the daemon searches the host's `$PATH` for the binary and uses the first result. type: "string" example: "/usr/local/bin/my-oci-runtime" runtimeArgs: description: | List of command-line arguments to pass to the runtime when invoked. type: "array" x-nullable: true items: type: "string" example: ["--debug", "--systemd-cgroup=false"] Commit: description: | Commit holds the Git-commit (SHA1) that a binary was built from, as reported in the version-string of external tools, such as `containerd`, or `runC`. type: "object" properties: ID: description: "Actual commit ID of external tool." type: "string" example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" Expected: description: | Commit ID of external tool expected by dockerd as set at build time. type: "string" example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" SwarmInfo: description: | Represents generic information about swarm. type: "object" properties: NodeID: description: "Unique identifier of for this node in the swarm." type: "string" default: "" example: "k67qz4598weg5unwwffg6z1m1" NodeAddr: description: | IP address at which this node can be reached by other nodes in the swarm. type: "string" default: "" example: "10.0.0.46" LocalNodeState: $ref: "#/definitions/LocalNodeState" ControlAvailable: type: "boolean" default: false example: true Error: type: "string" default: "" RemoteManagers: description: | List of ID's and addresses of other managers in the swarm. type: "array" default: null x-nullable: true items: $ref: "#/definitions/PeerNode" example: - NodeID: "71izy0goik036k48jg985xnds" Addr: "10.0.0.158:2377" - NodeID: "79y6h1o4gv8n120drcprv5nmc" Addr: "10.0.0.159:2377" - NodeID: "k67qz4598weg5unwwffg6z1m1" Addr: "10.0.0.46:2377" Nodes: description: "Total number of nodes in the swarm." type: "integer" x-nullable: true example: 4 Managers: description: "Total number of managers in the swarm." type: "integer" x-nullable: true example: 3 Cluster: $ref: "#/definitions/ClusterInfo" LocalNodeState: description: "Current local status of this node." type: "string" default: "" enum: - "" - "inactive" - "pending" - "active" - "error" - "locked" example: "active" PeerNode: description: "Represents a peer-node in the swarm" properties: NodeID: description: "Unique identifier of for this node in the swarm." type: "string" Addr: description: | IP address and ports at which this node can be reached. type: "string" NetworkAttachmentConfig: description: | Specifies how a service should be attached to a particular network. type: "object" properties: Target: description: | The target network for attachment. Must be a network name or ID. type: "string" Aliases: description: | Discoverable alternate names for the service on this network. type: "array" items: type: "string" DriverOpts: description: | Driver attachment options for the network target. type: "object" additionalProperties: type: "string" paths: /containers/json: get: summary: "List containers" description: | Returns a list of containers. For details on the format, see the [inspect endpoint](#operation/ContainerInspect). Note that it uses a different, smaller representation of a container than inspecting a single container. For example, the list of linked containers is not propagated . operationId: "ContainerList" produces: - "application/json" parameters: - name: "all" in: "query" description: | Return all containers. By default, only running containers are shown. type: "boolean" default: false - name: "limit" in: "query" description: | Return this number of most recently created containers, including non-running ones. type: "integer" - name: "size" in: "query" description: | Return the size of container as fields `SizeRw` and `SizeRootFs`. type: "boolean" default: false - name: "filters" in: "query" description: | Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`) - `before`=(`<container id>` or `<container name>`) - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) - `exited=<int>` containers with exit code of `<int>` - `health`=(`starting`|`healthy`|`unhealthy`|`none`) - `id=<ID>` a container's ID - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) - `is-task=`(`true`|`false`) - `label=key` or `label="key=value"` of a container label - `name=<name>` a container's name - `network`=(`<network id>` or `<network name>`) - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) - `since`=(`<container id>` or `<container name>`) - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) - `volume`=(`<volume name>` or `<mount point destination>`) type: "string" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/ContainerSummary" examples: application/json: - Id: "8dfafdbc3a40" Names: - "/boring_feynman" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 1" Created: 1367854155 State: "Exited" Status: "Exit 0" Ports: - PrivatePort: 2222 PublicPort: 3333 Type: "tcp" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" Gateway: "172.17.0.1" IPAddress: "172.17.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:02" Mounts: - Name: "fac362...80535" Source: "/data" Destination: "/data" Driver: "local" Mode: "ro,Z" RW: false Propagation: "" - Id: "9cd87474be90" Names: - "/coolName" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 222222" Created: 1367854155 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" Gateway: "172.17.0.1" IPAddress: "172.17.0.8" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:08" Mounts: [] - Id: "3176a2479c92" Names: - "/sleepy_dog" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 3333333333333333" Created: 1367854154 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" Gateway: "172.17.0.1" IPAddress: "172.17.0.6" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:06" Mounts: [] - Id: "4cb07b47f9fb" Names: - "/running_cat" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 444444444444444444444444444444444" Created: 1367854152 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" Gateway: "172.17.0.1" IPAddress: "172.17.0.5" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:05" Mounts: [] 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /containers/create: post: summary: "Create a container" operationId: "ContainerCreate" consumes: - "application/json" - "application/octet-stream" produces: - "application/json" parameters: - name: "name" in: "query" description: | Assign the specified name to the container. Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. type: "string" pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - name: "body" in: "body" description: "Container to create" schema: allOf: - $ref: "#/definitions/ContainerConfig" - type: "object" properties: HostConfig: $ref: "#/definitions/HostConfig" NetworkingConfig: $ref: "#/definitions/NetworkingConfig" example: Hostname: "" Domainname: "" User: "" AttachStdin: false AttachStdout: true AttachStderr: true Tty: false OpenStdin: false StdinOnce: false Env: - "FOO=bar" - "BAZ=quux" Cmd: - "date" Entrypoint: "" Image: "ubuntu" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" Volumes: /volumes/data: {} WorkingDir: "" NetworkDisabled: false MacAddress: "12:34:56:78:9a:bc" ExposedPorts: 22/tcp: {} StopSignal: "SIGTERM" StopTimeout: 10 HostConfig: Binds: - "/tmp:/tmp" Links: - "redis3:redis" Memory: 0 MemorySwap: 0 MemoryReservation: 0 KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 CpuPeriod: 100000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 CpuQuota: 50000 CpusetCpus: "0,1" CpusetMems: "0,1" MaximumIOps: 0 MaximumIOBps: 0 BlkioWeight: 300 BlkioWeightDevice: - {} BlkioDeviceReadBps: - {} BlkioDeviceReadIOps: - {} BlkioDeviceWriteBps: - {} BlkioDeviceWriteIOps: - {} DeviceRequests: - Driver: "nvidia" Count: -1 DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] Capabilities: [["gpu", "nvidia", "compute"]] Options: property1: "string" property2: "string" MemorySwappiness: 60 OomKillDisable: false OomScoreAdj: 500 PidMode: "" PidsLimit: 0 PortBindings: 22/tcp: - HostPort: "11022" PublishAllPorts: false Privileged: false ReadonlyRootfs: false Dns: - "8.8.8.8" DnsOptions: - "" DnsSearch: - "" VolumesFrom: - "parent" - "other:ro" CapAdd: - "NET_ADMIN" CapDrop: - "MKNOD" GroupAdd: - "newgroup" RestartPolicy: Name: "" MaximumRetryCount: 0 AutoRemove: true NetworkMode: "bridge" Devices: [] Ulimits: - {} LogConfig: Type: "json-file" Config: {} SecurityOpt: [] StorageOpt: {} CgroupParent: "" VolumeDriver: "" ShmSize: 67108864 NetworkingConfig: EndpointsConfig: isolated_nw: IPAMConfig: IPv4Address: "172.20.30.33" IPv6Address: "2001:db8:abcd::3033" LinkLocalIPs: - "169.254.34.68" - "fe80::3468" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" required: true responses: 201: description: "Container created successfully" schema: type: "object" title: "ContainerCreateResponse" description: "OK response to ContainerCreate operation" required: [Id, Warnings] properties: Id: description: "The ID of the created container" type: "string" x-nullable: false Warnings: description: "Warnings encountered when creating the container" type: "array" x-nullable: false items: type: "string" examples: application/json: Id: "e90e34656806" Warnings: [] 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such image" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: c2ada9df5af8" 409: description: "conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /containers/{id}/json: get: summary: "Inspect a container" description: "Return low-level information about a container." operationId: "ContainerInspect" produces: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "ContainerInspectResponse" properties: Id: description: "The ID of the container" type: "string" Created: description: "The time the container was created" type: "string" Path: description: "The path to the command being run" type: "string" Args: description: "The arguments to the command being run" type: "array" items: type: "string" State: x-nullable: true $ref: "#/definitions/ContainerState" Image: description: "The container's image ID" type: "string" ResolvConfPath: type: "string" HostnamePath: type: "string" HostsPath: type: "string" LogPath: type: "string" Name: type: "string" RestartCount: type: "integer" Driver: type: "string" Platform: type: "string" MountLabel: type: "string" ProcessLabel: type: "string" AppArmorProfile: type: "string" ExecIDs: description: "IDs of exec instances that are running in the container." type: "array" items: type: "string" x-nullable: true HostConfig: $ref: "#/definitions/HostConfig" GraphDriver: $ref: "#/definitions/GraphDriverData" SizeRw: description: | The size of files that have been created or changed by this container. type: "integer" format: "int64" SizeRootFs: description: "The total size of all the files in this container." type: "integer" format: "int64" Mounts: type: "array" items: $ref: "#/definitions/MountPoint" Config: $ref: "#/definitions/ContainerConfig" NetworkSettings: $ref: "#/definitions/NetworkSettings" examples: application/json: AppArmorProfile: "" Args: - "-c" - "exit 9" Config: AttachStderr: true AttachStdin: false AttachStdout: true Cmd: - "/bin/sh" - "-c" - "exit 9" Domainname: "" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Healthcheck: Test: ["CMD-SHELL", "exit 0"] Hostname: "ba033ac44011" Image: "ubuntu" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" MacAddress: "" NetworkDisabled: false OpenStdin: false StdinOnce: false Tty: false User: "" Volumes: /volumes/data: {} WorkingDir: "" StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" Driver: "devicemapper" ExecIDs: - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 BlkioWeight: 0 BlkioWeightDevice: - {} BlkioDeviceReadBps: - {} BlkioDeviceWriteBps: - {} BlkioDeviceReadIOps: - {} BlkioDeviceWriteIOps: - {} ContainerIDFile: "" CpusetCpus: "" CpusetMems: "" CpuPercent: 80 CpuShares: 0 CpuPeriod: 100000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 Devices: [] DeviceRequests: - Driver: "nvidia" Count: -1 DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] Capabilities: [["gpu", "nvidia", "compute"]] Options: property1: "string" property2: "string" IpcMode: "" LxcConf: [] Memory: 0 MemorySwap: 0 MemoryReservation: 0 KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" PidMode: "" PortBindings: {} Privileged: false ReadonlyRootfs: false PublishAllPorts: false RestartPolicy: MaximumRetryCount: 2 Name: "on-failure" LogConfig: Type: "json-file" Sysctls: net.ipv4.ip_forward: "1" Ulimits: - {} VolumeDriver: "" ShmSize: 67108864 HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" MountLabel: "" Name: "/boring_euclid" NetworkSettings: Bridge: "" SandboxID: "" HairpinMode: false LinkLocalIPv6Address: "" LinkLocalIPv6PrefixLen: 0 SandboxKey: "" EndpointID: "" Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 IPAddress: "" IPPrefixLen: 0 IPv6Gateway: "" MacAddress: "" Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" Gateway: "172.17.0.1" IPAddress: "172.17.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:12:00:02" Path: "/bin/sh" ProcessLabel: "" ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" RestartCount: 1 State: Error: "" ExitCode: 9 FinishedAt: "2015-01-06T15:47:32.080254511Z" Health: Status: "healthy" FailingStreak: 0 Log: - Start: "2019-12-22T10:59:05.6385933Z" End: "2019-12-22T10:59:05.8078452Z" ExitCode: 0 Output: "" OOMKilled: false Dead: false Paused: false Pid: 0 Restarting: false Running: true StartedAt: "2015-01-06T15:47:32.072697474Z" Status: "running" Mounts: - Name: "fac362...80535" Source: "/data" Destination: "/data" Driver: "local" Mode: "ro,Z" RW: false Propagation: "" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "size" in: "query" type: "boolean" default: false description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" tags: ["Container"] /containers/{id}/top: get: summary: "List processes running inside a container" description: | On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows. operationId: "ContainerTop" responses: 200: description: "no error" schema: type: "object" title: "ContainerTopResponse" description: "OK response to ContainerTop operation" properties: Titles: description: "The ps column titles" type: "array" items: type: "string" Processes: description: | Each process running in the container, where each is process is an array of values corresponding to the titles. type: "array" items: type: "array" items: type: "string" examples: application/json: Titles: - "UID" - "PID" - "PPID" - "C" - "STIME" - "TTY" - "TIME" - "CMD" Processes: - - "root" - "13642" - "882" - "0" - "17:03" - "pts/0" - "00:00:00" - "/bin/bash" - - "root" - "13735" - "13642" - "0" - "17:06" - "pts/0" - "00:00:00" - "sleep 10" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "ps_args" in: "query" description: "The arguments to pass to `ps`. For example, `aux`" type: "string" default: "-ef" tags: ["Container"] /containers/{id}/logs: get: summary: "Get container logs" description: | Get `stdout` and `stderr` logs from a container. Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. operationId: "ContainerLogs" responses: 200: description: | logs returned as a stream in response body. For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). Note that unlike the attach endpoint, the logs endpoint does not upgrade the connection and does not set Content-Type. schema: type: "string" format: "binary" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "until" in: "query" description: "Only return logs before this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Container"] /containers/{id}/changes: get: summary: "Get changes on a container’s filesystem" description: | Returns which files in a container's filesystem have been added, deleted, or modified. The `Kind` of modification can be one of: - `0`: Modified - `1`: Added - `2`: Deleted operationId: "ContainerChanges" produces: ["application/json"] responses: 200: description: "The list of changes" schema: type: "array" items: type: "object" x-go-name: "ContainerChangeResponseItem" title: "ContainerChangeResponseItem" description: "change item in response to ContainerChanges operation" required: [Path, Kind] properties: Path: description: "Path to file that has changed" type: "string" x-nullable: false Kind: description: "Kind of change" type: "integer" format: "uint8" enum: [0, 1, 2] x-nullable: false examples: application/json: - Path: "/dev" Kind: 0 - Path: "/dev/kmsg" Kind: 1 - Path: "/test" Kind: 1 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/export: get: summary: "Export a container" description: "Export the contents of a container as a tarball." operationId: "ContainerExport" produces: - "application/octet-stream" responses: 200: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/stats: get: summary: "Get container stats based on resource usage" description: | This endpoint returns a live stream of a container’s resource usage statistics. The `precpu_stats` is the CPU statistic of the *previous* read, and is used to calculate the CPU usage percentage. It is not an exact copy of the `cpu_stats` field. If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is nil then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. On a cgroup v2 host, the following fields are not set * `blkio_stats`: all fields other than `io_service_bytes_recursive` * `cpu_stats`: `cpu_usage.percpu_usage` * `memory_stats`: `max_usage` and `failcnt` Also, `memory_stats.stats` fields are incompatible with cgroup v1. To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: * used_memory = `memory_stats.usage - memory_stats.stats.cache` * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` operationId: "ContainerStats" produces: ["application/json"] responses: 200: description: "no error" schema: type: "object" examples: application/json: read: "2015-01-08T22:57:31.547920715Z" pids_stats: current: 3 networks: eth0: rx_bytes: 5338 rx_dropped: 0 rx_errors: 0 rx_packets: 36 tx_bytes: 648 tx_dropped: 0 tx_errors: 0 tx_packets: 8 eth5: rx_bytes: 4641 rx_dropped: 0 rx_errors: 0 rx_packets: 26 tx_bytes: 690 tx_dropped: 0 tx_errors: 0 tx_packets: 9 memory_stats: stats: total_pgmajfault: 0 cache: 0 mapped_file: 0 total_inactive_file: 0 pgpgout: 414 rss: 6537216 total_mapped_file: 0 writeback: 0 unevictable: 0 pgpgin: 477 total_unevictable: 0 pgmajfault: 0 total_rss: 6537216 total_rss_huge: 6291456 total_writeback: 0 total_inactive_anon: 0 rss_huge: 6291456 hierarchical_memory_limit: 67108864 total_pgfault: 964 total_active_file: 0 active_anon: 6537216 total_active_anon: 6537216 total_pgpgout: 414 total_cache: 0 inactive_anon: 0 active_file: 0 pgfault: 964 inactive_file: 0 total_pgpgin: 477 max_usage: 6651904 usage: 6537216 failcnt: 0 limit: 67108864 blkio_stats: {} cpu_stats: cpu_usage: percpu_usage: - 8646879 - 24472255 - 36438778 - 30657443 usage_in_usermode: 50000000 total_usage: 100215355 usage_in_kernelmode: 30000000 system_cpu_usage: 739306590000000 online_cpus: 4 throttling_data: periods: 0 throttled_periods: 0 throttled_time: 0 precpu_stats: cpu_usage: percpu_usage: - 8646879 - 24350896 - 36438778 - 30657443 usage_in_usermode: 50000000 total_usage: 100093996 usage_in_kernelmode: 30000000 system_cpu_usage: 9492140000000 online_cpus: 4 throttling_data: periods: 0 throttled_periods: 0 throttled_time: 0 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "stream" in: "query" description: | Stream the output. If false, the stats will be output once and then it will disconnect. type: "boolean" default: true - name: "one-shot" in: "query" description: | Only get a single stat instead of waiting for 2 cycles. Must be used with `stream=false`. type: "boolean" default: false tags: ["Container"] /containers/{id}/resize: post: summary: "Resize a container TTY" description: "Resize the TTY for a container." operationId: "ContainerResize" consumes: - "application/octet-stream" produces: - "text/plain" responses: 200: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "cannot resize container" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "h" in: "query" description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" description: "Width of the TTY session in characters" type: "integer" tags: ["Container"] /containers/{id}/start: post: summary: "Start a container" operationId: "ContainerStart" responses: 204: description: "no error" 304: description: "container already started" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. type: "string" tags: ["Container"] /containers/{id}/stop: post: summary: "Stop a container" operationId: "ContainerStop" responses: 204: description: "no error" 304: description: "container already stopped" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" type: "integer" tags: ["Container"] /containers/{id}/restart: post: summary: "Restart a container" operationId: "ContainerRestart" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" type: "integer" tags: ["Container"] /containers/{id}/kill: post: summary: "Kill a container" description: | Send a POSIX signal to a container, defaulting to killing to the container. operationId: "ContainerKill" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "container is not running" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "signal" in: "query" description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" type: "string" default: "SIGKILL" tags: ["Container"] /containers/{id}/update: post: summary: "Update a container" description: | Change various configuration options of a container without having to recreate it. operationId: "ContainerUpdate" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "The container has been updated." schema: type: "object" title: "ContainerUpdateResponse" description: "OK response to ContainerUpdate operation" properties: Warnings: type: "array" items: type: "string" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "update" in: "body" required: true schema: allOf: - $ref: "#/definitions/Resources" - type: "object" properties: RestartPolicy: $ref: "#/definitions/RestartPolicy" example: BlkioWeight: 300 CpuShares: 512 CpuPeriod: 100000 CpuQuota: 50000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 CpusetCpus: "0,1" CpusetMems: "0" Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" tags: ["Container"] /containers/{id}/rename: post: summary: "Rename a container" operationId: "ContainerRename" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "name already in use" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "name" in: "query" required: true description: "New name for the container" type: "string" tags: ["Container"] /containers/{id}/pause: post: summary: "Pause a container" description: | Use the freezer cgroup to suspend all processes in a container. Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the freezer cgroup the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. operationId: "ContainerPause" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/unpause: post: summary: "Unpause a container" description: "Resume a container which has been paused." operationId: "ContainerUnpause" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/attach: post: summary: "Attach to a container" description: | Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. ### Hijacking This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. This is the response from the daemon for an attach request: ``` HTTP/1.1 200 OK Content-Type: application/vnd.docker.raw-stream [STREAM] ``` After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. For example, the client sends this request to upgrade the connection: ``` POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 Upgrade: tcp Connection: Upgrade ``` The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: ``` HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp [STREAM] ``` ### Stream format When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). It is encoded on the first eight bytes like this: ```go header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} ``` `STREAM_TYPE` can be: - 0: `stdin` (is written on `stdout`) - 1: `stdout` - 2: `stderr` `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. The simplest way to implement this protocol is the following: 1. Read 8 bytes. 2. Choose `stdout` or `stderr` depending on the first byte. 3. Extract the frame size from the last four bytes. 4. Read the extracted size and output it on the correct output. 5. Goto 1. ### Stream format when using a TTY When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. operationId: "ContainerAttach" produces: - "application/vnd.docker.raw-stream" responses: 101: description: "no error, hints proxy about hijacking" 200: description: "no error, no upgrade header found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. type: "string" - name: "logs" in: "query" description: | Replay previous logs from the container. This is useful for attaching to a container that has started and you want to output everything since the container started. If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. type: "boolean" default: false - name: "stream" in: "query" description: | Stream attached streams from the time the request was made onwards. type: "boolean" default: false - name: "stdin" in: "query" description: "Attach to `stdin`" type: "boolean" default: false - name: "stdout" in: "query" description: "Attach to `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Attach to `stderr`" type: "boolean" default: false tags: ["Container"] /containers/{id}/attach/ws: get: summary: "Attach to a container via a websocket" operationId: "ContainerAttachWebsocket" responses: 101: description: "no error, hints proxy about hijacking" 200: description: "no error, no upgrade header found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`. type: "string" - name: "logs" in: "query" description: "Return logs" type: "boolean" default: false - name: "stream" in: "query" description: "Return stream" type: "boolean" default: false - name: "stdin" in: "query" description: "Attach to `stdin`" type: "boolean" default: false - name: "stdout" in: "query" description: "Attach to `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Attach to `stderr`" type: "boolean" default: false tags: ["Container"] /containers/{id}/wait: post: summary: "Wait for a container" description: "Block until a container stops, then returns the exit code." operationId: "ContainerWait" produces: ["application/json"] responses: 200: description: "The container has exit." schema: type: "object" title: "ContainerWaitResponse" description: "OK response to ContainerWait operation" required: [StatusCode] properties: StatusCode: description: "Exit code of the container" type: "integer" x-nullable: false Error: description: "container waiting error, if any" type: "object" properties: Message: description: "Details of an error" type: "string" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "condition" in: "query" description: | Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'. type: "string" default: "not-running" tags: ["Container"] /containers/{id}: delete: summary: "Remove a container" operationId: "ContainerDelete" responses: 204: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "conflict" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: | You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "v" in: "query" description: "Remove anonymous volumes associated with the container." type: "boolean" default: false - name: "force" in: "query" description: "If the container is running, kill it before removing it." type: "boolean" default: false - name: "link" in: "query" description: "Remove the specified link associated with the container." type: "boolean" default: false tags: ["Container"] /containers/{id}/archive: head: summary: "Get information about files in a container" description: | A response header `X-Docker-Container-Path-Stat` is returned, containing a base64 - encoded JSON object with some filesystem header information about the path. operationId: "ContainerArchiveInfo" responses: 200: description: "no error" headers: X-Docker-Container-Path-Stat: type: "string" description: | A base64 - encoded JSON object with some filesystem header information about the path 400: description: "Bad parameter" schema: allOf: - $ref: "#/definitions/ErrorResponse" - type: "object" properties: message: description: | The error message. Either "must specify path parameter" (path cannot be empty) or "not a directory" (path was asserted to be a directory but exists as a file). type: "string" x-nullable: false 404: description: "Container or path does not exist" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Resource in the container’s filesystem to archive." type: "string" tags: ["Container"] get: summary: "Get an archive of a filesystem resource in a container" description: "Get a tar archive of a resource in the filesystem of container id." operationId: "ContainerArchive" produces: ["application/x-tar"] responses: 200: description: "no error" 400: description: "Bad parameter" schema: allOf: - $ref: "#/definitions/ErrorResponse" - type: "object" properties: message: description: | The error message. Either "must specify path parameter" (path cannot be empty) or "not a directory" (path was asserted to be a directory but exists as a file). type: "string" x-nullable: false 404: description: "Container or path does not exist" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Resource in the container’s filesystem to archive." type: "string" tags: ["Container"] put: summary: "Extract an archive of files or folders to a directory in a container" description: "Upload a tar archive to be extracted to a path in the filesystem of container id." operationId: "PutContainerArchive" consumes: ["application/x-tar", "application/octet-stream"] responses: 200: description: "The content was extracted successfully" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 403: description: "Permission denied, the volume or container rootfs is marked as read-only." schema: $ref: "#/definitions/ErrorResponse" 404: description: "No such container or path does not exist inside the container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Path to a directory in the container to extract the archive’s contents into. " type: "string" - name: "noOverwriteDirNonDir" in: "query" description: | If `1`, `true`, or `True` then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa. type: "string" - name: "copyUIDGID" in: "query" description: | If `1`, `true`, then it will copy UID/GID maps to the dest file or dir type: "string" - name: "inputStream" in: "body" required: true description: | The input stream must be a tar archive compressed with one of the following algorithms: `identity` (no compression), `gzip`, `bzip2`, or `xz`. schema: type: "string" format: "binary" tags: ["Container"] /containers/prune: post: summary: "Delete stopped containers" produces: - "application/json" operationId: "ContainerPrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "ContainerPruneResponse" properties: ContainersDeleted: description: "Container IDs that were deleted" type: "array" items: type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /images/json: get: summary: "List Images" description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." operationId: "ImageList" produces: - "application/json" responses: 200: description: "Summary image data for the images matching the query" schema: type: "array" items: $ref: "#/definitions/ImageSummary" examples: application/json: - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" ParentId: "" RepoTags: - "ubuntu:12.04" - "ubuntu:precise" RepoDigests: - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" Created: 1474925151 Size: 103579269 VirtualSize: 103579269 SharedSize: 0 Labels: {} Containers: 2 - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" ParentId: "" RepoTags: - "ubuntu:12.10" - "ubuntu:quantal" RepoDigests: - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" Created: 1403128455 Size: 172064416 VirtualSize: 172064416 SharedSize: 0 Labels: {} Containers: 5 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "all" in: "query" description: "Show all images. Only images from a final layer (no children) are shown by default." type: "boolean" default: false - name: "filters" in: "query" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) - `dangling=true` - `label=key` or `label="key=value"` of an image label - `reference`=(`<image-name>[:<tag>]`) - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) type: "string" - name: "shared-size" in: "query" description: "Compute and show shared size as a `SharedSize` field on each image." type: "boolean" default: false - name: "digests" in: "query" description: "Show digest information as a `RepoDigests` field on each image." type: "boolean" default: false tags: ["Image"] /build: post: summary: "Build an image" description: | Build an image from a tar archive with a `Dockerfile` in it. The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. The build is canceled if the client drops the connection by quitting or being killed. operationId: "ImageBuild" consumes: - "application/octet-stream" produces: - "application/json" parameters: - name: "inputStream" in: "body" description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." schema: type: "string" format: "binary" - name: "dockerfile" in: "query" description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." type: "string" default: "Dockerfile" - name: "t" in: "query" description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." type: "string" - name: "extrahosts" in: "query" description: "Extra hosts to add to /etc/hosts" type: "string" - name: "remote" in: "query" description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." type: "string" - name: "q" in: "query" description: "Suppress verbose build output." type: "boolean" default: false - name: "nocache" in: "query" description: "Do not use the cache when building the image." type: "boolean" default: false - name: "cachefrom" in: "query" description: "JSON array of images used for build cache resolution." type: "string" - name: "pull" in: "query" description: "Attempt to pull the image even if an older image exists locally." type: "string" - name: "rm" in: "query" description: "Remove intermediate containers after a successful build." type: "boolean" default: true - name: "forcerm" in: "query" description: "Always remove intermediate containers, even upon failure." type: "boolean" default: false - name: "memory" in: "query" description: "Set memory limit for build." type: "integer" - name: "memswap" in: "query" description: "Total memory (memory + swap). Set as `-1` to disable swap." type: "integer" - name: "cpushares" in: "query" description: "CPU shares (relative weight)." type: "integer" - name: "cpusetcpus" in: "query" description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." type: "string" - name: "cpuperiod" in: "query" description: "The length of a CPU period in microseconds." type: "integer" - name: "cpuquota" in: "query" description: "Microseconds of CPU time that the container can get in a CPU period." type: "integer" - name: "buildargs" in: "query" description: > JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) type: "string" - name: "shmsize" in: "query" description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." type: "integer" - name: "squash" in: "query" description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" type: "boolean" - name: "labels" in: "query" description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." type: "string" - name: "networkmode" in: "query" description: | Sets the networking mode for the run commands during build. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken as a custom network's name or ID to which this container should connect to. type: "string" - name: "Content-type" in: "header" type: "string" enum: - "application/x-tar" default: "application/x-tar" - name: "X-Registry-Config" in: "header" description: | This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: ``` { "docker.example.com": { "username": "janedoe", "password": "hunter2" }, "https://index.docker.io/v1/": { "username": "mobydock", "password": "conta1n3rize14" } } ``` Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. type: "string" - name: "platform" in: "query" description: "Platform in the format os[/arch[/variant]]" type: "string" default: "" - name: "target" in: "query" description: "Target build stage" type: "string" default: "" - name: "outputs" in: "query" description: "BuildKit output configuration" type: "string" default: "" responses: 200: description: "no error" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /build/prune: post: summary: "Delete builder cache" produces: - "application/json" operationId: "BuildPrune" parameters: - name: "keep-storage" in: "query" description: "Amount of disk space in bytes to keep for cache" type: "integer" format: "int64" - name: "all" in: "query" type: "boolean" description: "Remove all types of build cache" - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters: - `until=<duration>`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') - `id=<id>` - `parent=<id>` - `type=<string>` - `description=<string>` - `inuse` - `shared` - `private` responses: 200: description: "No error" schema: type: "object" title: "BuildPruneResponse" properties: CachesDeleted: type: "array" items: description: "ID of build cache object" type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /images/create: post: summary: "Create an image" description: "Create an image by either pulling it from a registry or importing it." operationId: "ImageCreate" consumes: - "text/plain" - "application/octet-stream" produces: - "application/json" responses: 200: description: "no error" 404: description: "repository does not exist or no read access" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "fromImage" in: "query" description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." type: "string" - name: "fromSrc" in: "query" description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." type: "string" - name: "repo" in: "query" description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." type: "string" - name: "tag" in: "query" description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." type: "string" - name: "message" in: "query" description: "Set commit message for imported image." type: "string" - name: "inputImage" in: "body" description: "Image content if the value `-` has been specified in fromSrc query parameter" schema: type: "string" required: false - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "changes" in: "query" description: | Apply `Dockerfile` instructions to the image that is created, for example: `changes=ENV DEBUG=true`. Note that `ENV DEBUG=true` should be URI component encoded. Supported `Dockerfile` instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` type: "array" items: type: "string" - name: "platform" in: "query" description: "Platform in the format os[/arch[/variant]]" type: "string" default: "" tags: ["Image"] /images/{name}/json: get: summary: "Inspect an image" description: "Return low-level information about an image." operationId: "ImageInspect" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/Image" examples: application/json: Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" Comment: "" Os: "linux" Architecture: "amd64" Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" ContainerConfig: Tty: false Hostname: "e611e15f9c9d" Domainname: "" AttachStdout: false PublishService: "" AttachStdin: false OpenStdin: false StdinOnce: false NetworkDisabled: false OnBuild: [] Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" User: "" WorkingDir: "" MacAddress: "" AttachStderr: false Labels: com.example.license: "GPL" com.example.version: "1.0" com.example.vendor: "Acme" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Cmd: - "/bin/sh" - "-c" - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" DockerVersion: "1.9.0-dev" VirtualSize: 188359297 Size: 0 Author: "" Created: "2015-09-10T08:30:53.26995814Z" GraphDriver: Name: "aufs" Data: {} RepoDigests: - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" RepoTags: - "example:1.0" - "example:latest" - "example:stable" Config: Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" NetworkDisabled: false OnBuild: [] StdinOnce: false PublishService: "" AttachStdin: false OpenStdin: false Domainname: "" AttachStdout: false Tty: false Hostname: "e611e15f9c9d" Cmd: - "/bin/bash" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Labels: com.example.vendor: "Acme" com.example.version: "1.0" com.example.license: "GPL" MacAddress: "" AttachStderr: false WorkingDir: "" User: "" RootFS: Type: "layers" Layers: - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: someimage (tag: latest)" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or id" type: "string" required: true tags: ["Image"] /images/{name}/history: get: summary: "Get the history of an image" description: "Return parent layers of an image." operationId: "ImageHistory" produces: ["application/json"] responses: 200: description: "List of image layers" schema: type: "array" items: type: "object" x-go-name: HistoryResponseItem title: "HistoryResponseItem" description: "individual image layer information in response to ImageHistory operation" required: [Id, Created, CreatedBy, Tags, Size, Comment] properties: Id: type: "string" x-nullable: false Created: type: "integer" format: "int64" x-nullable: false CreatedBy: type: "string" x-nullable: false Tags: type: "array" items: type: "string" Size: type: "integer" format: "int64" x-nullable: false Comment: type: "string" x-nullable: false examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" Created: 1398108230 CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" Tags: - "ubuntu:lucid" - "ubuntu:10.04" Size: 182964289 Comment: "" - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" Created: 1398108222 CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <[email protected]> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" Tags: [] Size: 0 Comment: "" - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" Created: 1371157430 CreatedBy: "" Tags: - "scratch12:latest" - "scratch:latest" Size: 0 Comment: "Imported from -" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true tags: ["Image"] /images/{name}/push: post: summary: "Push an image" description: | Push an image to a registry. If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. The push is cancelled if the HTTP connection is closed. operationId: "ImagePush" consumes: - "application/octet-stream" responses: 200: description: "No error" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID." type: "string" required: true - name: "tag" in: "query" description: "The tag to associate with the image on the registry." type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration. Refer to the [authentication section](#section/Authentication) for details. type: "string" required: true tags: ["Image"] /images/{name}/tag: post: summary: "Tag an image" description: "Tag an image so that it becomes part of a repository." operationId: "ImageTag" responses: 201: description: "No error" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID to tag." type: "string" required: true - name: "repo" in: "query" description: "The repository to tag in. For example, `someuser/someimage`." type: "string" - name: "tag" in: "query" description: "The name of the new tag." type: "string" tags: ["Image"] /images/{name}: delete: summary: "Remove an image" description: | Remove an image, along with any untagged parent images that were referenced by that image. Images can't be removed if they have descendant images, are being used by a running container or are being used by a build. operationId: "ImageDelete" produces: ["application/json"] responses: 200: description: "The image was deleted successfully" schema: type: "array" items: $ref: "#/definitions/ImageDeleteResponseItem" examples: application/json: - Untagged: "3e2f21a89f" - Deleted: "3e2f21a89f" - Deleted: "53b4f83ac9" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true - name: "force" in: "query" description: "Remove the image even if it is being used by stopped containers or has other tags" type: "boolean" default: false - name: "noprune" in: "query" description: "Do not delete untagged parent images" type: "boolean" default: false tags: ["Image"] /images/search: get: summary: "Search images" description: "Search for an image on Docker Hub." operationId: "ImageSearch" produces: - "application/json" responses: 200: description: "No error" schema: type: "array" items: type: "object" title: "ImageSearchResponseItem" properties: description: type: "string" is_official: type: "boolean" is_automated: type: "boolean" name: type: "string" star_count: type: "integer" examples: application/json: - description: "" is_official: false is_automated: false name: "wma55/u1210sshd" star_count: 0 - description: "" is_official: false is_automated: false name: "jdswinbank/sshd" star_count: 0 - description: "" is_official: false is_automated: false name: "vgauthier/sshd" star_count: 0 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "term" in: "query" description: "Term to search" type: "string" required: true - name: "limit" in: "query" description: "Maximum number of results to return" type: "integer" - name: "filters" in: "query" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - `is-automated=(true|false)` - `is-official=(true|false)` - `stars=<number>` Matches images that has at least 'number' stars. type: "string" tags: ["Image"] /images/prune: post: summary: "Delete unused images" produces: - "application/json" operationId: "ImagePrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `dangling=<boolean>` When set to `true` (or `1`), prune only unused *and* untagged images. When set to `false` (or `0`), all unused images are pruned. - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "ImagePruneResponse" properties: ImagesDeleted: description: "Images that were deleted" type: "array" items: $ref: "#/definitions/ImageDeleteResponseItem" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /auth: post: summary: "Check auth configuration" description: | Validate credentials for a registry and, if available, get an identity token for accessing the registry without password. operationId: "SystemAuth" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "An identity token was generated successfully." schema: type: "object" title: "SystemAuthResponse" required: [Status] properties: Status: description: "The status of the authentication" type: "string" x-nullable: false IdentityToken: description: "An opaque token used to authenticate a user after a successful login" type: "string" x-nullable: false examples: application/json: Status: "Login Succeeded" IdentityToken: "9cbaf023786cd7..." 204: description: "No error" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "authConfig" in: "body" description: "Authentication to check" schema: $ref: "#/definitions/AuthConfig" tags: ["System"] /info: get: summary: "Get system information" operationId: "SystemInfo" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/SystemInfo" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /version: get: summary: "Get version" description: "Returns the version of Docker that is running and various information about the system that Docker is running on." operationId: "SystemVersion" produces: ["application/json"] responses: 200: description: "no error" schema: $ref: "#/definitions/SystemVersion" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /_ping: get: summary: "Ping" description: "This is a dummy endpoint you can use to test if the server is accessible." operationId: "SystemPing" produces: ["text/plain"] responses: 200: description: "no error" schema: type: "string" example: "OK" headers: API-Version: type: "string" description: "Max API Version the server supports" Builder-Version: type: "string" description: "Default version of docker image builder" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" headers: Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" tags: ["System"] head: summary: "Ping" description: "This is a dummy endpoint you can use to test if the server is accessible." operationId: "SystemPingHead" produces: ["text/plain"] responses: 200: description: "no error" schema: type: "string" example: "(empty)" headers: API-Version: type: "string" description: "Max API Version the server supports" Builder-Version: type: "string" description: "Default version of docker image builder" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /commit: post: summary: "Create a new image from a container" operationId: "ImageCommit" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "containerConfig" in: "body" description: "The container configuration" schema: $ref: "#/definitions/ContainerConfig" - name: "container" in: "query" description: "The ID or name of the container to commit" type: "string" - name: "repo" in: "query" description: "Repository name for the created image" type: "string" - name: "tag" in: "query" description: "Tag name for the create image" type: "string" - name: "comment" in: "query" description: "Commit message" type: "string" - name: "author" in: "query" description: "Author of the image (e.g., `John Hannibal Smith <[email protected]>`)" type: "string" - name: "pause" in: "query" description: "Whether to pause the container before committing" type: "boolean" default: true - name: "changes" in: "query" description: "`Dockerfile` instructions to apply while committing" type: "string" tags: ["Image"] /events: get: summary: "Monitor events" description: | Stream real-time events from the server. Various objects within Docker report events when something happens to them. Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` The Docker daemon reports these events: `reload` Services report these events: `create`, `update`, and `remove` Nodes report these events: `create`, `update`, and `remove` Secrets report these events: `create`, `update`, and `remove` Configs report these events: `create`, `update`, and `remove` The Builder reports `prune` events operationId: "SystemEvents" produces: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "SystemEventsResponse" properties: Type: description: "The type of object emitting the event" type: "string" Action: description: "The type of event" type: "string" Actor: type: "object" properties: ID: description: "The ID of the object emitting the event" type: "string" Attributes: description: "Various key/value attributes of the object, depending on its type" type: "object" additionalProperties: type: "string" time: description: "Timestamp of event" type: "integer" timeNano: description: "Timestamp of event, with nanosecond accuracy" type: "integer" format: "int64" examples: application/json: Type: "container" Action: "create" Actor: ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" Attributes: com.example.some-label: "some-label-value" image: "alpine" name: "my-container" time: 1461943101 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "since" in: "query" description: "Show events created since this timestamp then stream new events." type: "string" - name: "until" in: "query" description: "Show events created until this timestamp then stop streaming." type: "string" - name: "filters" in: "query" description: | A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: - `config=<string>` config name or ID - `container=<string>` container name or ID - `daemon=<string>` daemon name or ID - `event=<string>` event type - `image=<string>` image name or ID - `label=<string>` image or container label - `network=<string>` network name or ID - `node=<string>` node ID - `plugin`=<string> plugin name or ID - `scope`=<string> local or swarm - `secret=<string>` secret name or ID - `service=<string>` service name or ID - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` - `volume=<string>` volume name type: "string" tags: ["System"] /system/df: get: summary: "Get data usage information" operationId: "SystemDataUsage" responses: 200: description: "no error" schema: type: "object" title: "SystemDataUsageResponse" properties: LayersSize: type: "integer" format: "int64" Images: type: "array" items: $ref: "#/definitions/ImageSummary" Containers: type: "array" items: $ref: "#/definitions/ContainerSummary" Volumes: type: "array" items: $ref: "#/definitions/Volume" BuildCache: type: "array" items: $ref: "#/definitions/BuildCache" example: LayersSize: 1092588 Images: - Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" ParentId: "" RepoTags: - "busybox:latest" RepoDigests: - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" Created: 1466724217 Size: 1092588 SharedSize: 0 VirtualSize: 1092588 Labels: {} Containers: 1 Containers: - Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" Names: - "/top" Image: "busybox" ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" Command: "top" Created: 1472592424 Ports: [] SizeRootFs: 1092588 Labels: {} State: "exited" Status: "Exited (0) 56 minutes ago" HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: IPAMConfig: null Links: null Aliases: null NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" Gateway: "172.18.0.1" IPAddress: "172.18.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:12:00:02" Mounts: [] Volumes: - Name: "my-volume" Driver: "local" Mountpoint: "/var/lib/docker/volumes/my-volume/_data" Labels: null Scope: "local" Options: null UsageData: Size: 10920104 RefCount: 2 BuildCache: - ID: "hw53o5aio51xtltp5xjp8v7fx" Parent: "" Type: "regular" Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" InUse: false Shared: true Size: 0 CreatedAt: "2021-06-28T13:31:01.474619385Z" LastUsedAt: "2021-07-07T22:02:32.738075951Z" UsageCount: 26 - ID: "ndlpt0hhvkqcdfkputsk4cq9c" Parent: "hw53o5aio51xtltp5xjp8v7fx" Type: "regular" Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" InUse: false Shared: true Size: 51 CreatedAt: "2021-06-28T13:31:03.002625487Z" LastUsedAt: "2021-07-07T22:02:32.773909517Z" UsageCount: 26 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "type" in: "query" description: | Object types, for which to compute and return data. type: "array" collectionFormat: multi items: type: "string" enum: ["container", "image", "volume", "build-cache"] tags: ["System"] /images/{name}/get: get: summary: "Export an image" description: | Get a tarball containing all images and metadata for a repository. If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. ### Image tarball format An image tarball contains one directory per image layer (named using its long ID), each containing these files: - `VERSION`: currently `1.0` - the file format version - `json`: detailed layer information, similar to `docker inspect layer_id` - `layer.tar`: A tarfile containing the filesystem changes in this layer The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ```json { "hello-world": { "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" } } ``` operationId: "ImageGet" produces: - "application/x-tar" responses: 200: description: "no error" schema: type: "string" format: "binary" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true tags: ["Image"] /images/get: get: summary: "Export several images" description: | Get a tarball containing all images and metadata for several image repositories. For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageGetAll" produces: - "application/x-tar" responses: 200: description: "no error" schema: type: "string" format: "binary" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "names" in: "query" description: "Image names to filter by" type: "array" items: type: "string" tags: ["Image"] /images/load: post: summary: "Import images" description: | Load a set of images and tags into a repository. For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageLoad" consumes: - "application/x-tar" produces: - "application/json" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "imagesTarball" in: "body" description: "Tar archive containing images" schema: type: "string" format: "binary" - name: "quiet" in: "query" description: "Suppress progress details during load." type: "boolean" default: false tags: ["Image"] /containers/{id}/exec: post: summary: "Create an exec instance" description: "Run a command inside a running container." operationId: "ContainerExec" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "container is paused" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "execConfig" in: "body" description: "Exec configuration" schema: type: "object" title: "ExecConfig" properties: AttachStdin: type: "boolean" description: "Attach to `stdin` of the exec command." AttachStdout: type: "boolean" description: "Attach to `stdout` of the exec command." AttachStderr: type: "boolean" description: "Attach to `stderr` of the exec command." DetachKeys: type: "string" description: | Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Tty: type: "boolean" description: "Allocate a pseudo-TTY." Env: description: | A list of environment variables in the form `["VAR=value", ...]`. type: "array" items: type: "string" Cmd: type: "array" description: "Command to run, as a string or array of strings." items: type: "string" Privileged: type: "boolean" description: "Runs the exec process with extended privileges." default: false User: type: "string" description: | The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`. WorkingDir: type: "string" description: | The working directory for the exec process inside the container. example: AttachStdin: false AttachStdout: true AttachStderr: true DetachKeys: "ctrl-p,ctrl-q" Tty: false Cmd: - "date" Env: - "FOO=bar" - "BAZ=quux" required: true - name: "id" in: "path" description: "ID or name of container" type: "string" required: true tags: ["Exec"] /exec/{id}/start: post: summary: "Start an exec instance" description: | Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command. operationId: "ExecStart" consumes: - "application/json" produces: - "application/vnd.docker.raw-stream" responses: 200: description: "No error" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Container is stopped or paused" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "execStartConfig" in: "body" schema: type: "object" title: "ExecStartConfig" properties: Detach: type: "boolean" description: "Detach from the command." Tty: type: "boolean" description: "Allocate a pseudo-TTY." example: Detach: false Tty: false - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" tags: ["Exec"] /exec/{id}/resize: post: summary: "Resize an exec instance" description: | Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance. operationId: "ExecResize" responses: 201: description: "No error" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" - name: "h" in: "query" description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" description: "Width of the TTY session in characters" type: "integer" tags: ["Exec"] /exec/{id}/json: get: summary: "Inspect an exec instance" description: "Return low-level information about an exec instance." operationId: "ExecInspect" produces: - "application/json" responses: 200: description: "No error" schema: type: "object" title: "ExecInspectResponse" properties: CanRemove: type: "boolean" DetachKeys: type: "string" ID: type: "string" Running: type: "boolean" ExitCode: type: "integer" ProcessConfig: $ref: "#/definitions/ProcessConfig" OpenStdin: type: "boolean" OpenStderr: type: "boolean" OpenStdout: type: "boolean" ContainerID: type: "string" Pid: type: "integer" description: "The system process ID for the exec process." examples: application/json: CanRemove: false ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" DetachKeys: "" ExitCode: 2 ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" OpenStderr: true OpenStdin: true OpenStdout: true ProcessConfig: arguments: - "-c" - "exit 2" entrypoint: "sh" privileged: false tty: true user: "1000" Running: false Pid: 42000 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" tags: ["Exec"] /volumes: get: summary: "List volumes" operationId: "VolumeList" produces: ["application/json"] responses: 200: description: "Summary volume data that matches the query" schema: type: "object" title: "VolumeListResponse" description: "Volume list response" required: [Volumes, Warnings] properties: Volumes: type: "array" x-nullable: false description: "List of volumes" items: $ref: "#/definitions/Volume" Warnings: type: "array" x-nullable: false description: | Warnings that occurred when fetching the list of volumes. items: type: "string" examples: application/json: Volumes: - CreatedAt: "2017-07-19T12:00:26Z" Name: "tardis" Driver: "local" Mountpoint: "/var/lib/docker/volumes/tardis" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Scope: "local" Options: device: "tmpfs" o: "size=100m,uid=1000" type: "tmpfs" Warnings: [] 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. Available filters: - `dangling=<boolean>` When set to `true` (or `1`), returns all volumes that are not in use by a container. When set to `false` (or `0`), only volumes that are in use by one or more containers are returned. - `driver=<volume-driver-name>` Matches volumes based on their driver. - `label=<key>` or `label=<key>:<value>` Matches volumes based on the presence of a `label` alone or a `label` and a value. - `name=<volume-name>` Matches all or part of a volume name. type: "string" format: "json" tags: ["Volume"] /volumes/create: post: summary: "Create a volume" operationId: "VolumeCreate" consumes: ["application/json"] produces: ["application/json"] responses: 201: description: "The volume was created successfully" schema: $ref: "#/definitions/Volume" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "volumeConfig" in: "body" required: true description: "Volume configuration" schema: type: "object" description: "Volume configuration" title: "VolumeConfig" properties: Name: description: | The new volume's name. If not specified, Docker generates a name. type: "string" x-nullable: false Driver: description: "Name of the volume driver to use." type: "string" default: "local" x-nullable: false DriverOpts: description: | A mapping of driver options and values. These options are passed directly to the driver and are driver specific. type: "object" additionalProperties: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: Name: "tardis" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Driver: "custom" tags: ["Volume"] /volumes/{name}: get: summary: "Inspect a volume" operationId: "VolumeInspect" produces: ["application/json"] responses: 200: description: "No error" schema: $ref: "#/definitions/Volume" 404: description: "No such volume" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" required: true description: "Volume name or ID" type: "string" tags: ["Volume"] delete: summary: "Remove a volume" description: "Instruct the driver to remove the volume." operationId: "VolumeDelete" responses: 204: description: "The volume was removed" 404: description: "No such volume or volume driver" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Volume is in use and cannot be removed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" required: true description: "Volume name or ID" type: "string" - name: "force" in: "query" description: "Force the removal of the volume" type: "boolean" default: false tags: ["Volume"] /volumes/prune: post: summary: "Delete unused volumes" produces: - "application/json" operationId: "VolumePrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "VolumePruneResponse" properties: VolumesDeleted: description: "Volumes that were deleted" type: "array" items: type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Volume"] /networks: get: summary: "List networks" description: | Returns a list of networks. For details on the format, see the [network inspect endpoint](#operation/NetworkInspect). Note that it uses a different, smaller representation of a network than inspecting a single network. For example, the list of containers attached to the network is not propagated in API versions 1.28 and up. operationId: "NetworkList" produces: - "application/json" responses: 200: description: "No error" schema: type: "array" items: $ref: "#/definitions/Network" examples: application/json: - Name: "bridge" Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" Created: "2016-10-19T06:21:00.416543526Z" Scope: "local" Driver: "bridge" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: - Subnet: "172.17.0.0/16" Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" - Name: "none" Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "null" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: [] Containers: {} Options: {} - Name: "host" Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "host" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: [] Containers: {} Options: {} 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: - `dangling=<boolean>` When set to `true` (or `1`), returns all networks that are not in use by a container. When set to `false` (or `0`), only networks that are in use by one or more containers are returned. - `driver=<driver-name>` Matches a network's driver. - `id=<network-id>` Matches all or part of a network ID. - `label=<key>` or `label=<key>=<value>` of a network label. - `name=<network-name>` Matches all or part of a network name. - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. type: "string" tags: ["Network"] /networks/{id}: get: summary: "Inspect a network" operationId: "NetworkInspect" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/Network" 404: description: "Network not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "verbose" in: "query" description: "Detailed inspect output for troubleshooting" type: "boolean" default: false - name: "scope" in: "query" description: "Filter the network by scope (swarm, global, or local)" type: "string" tags: ["Network"] delete: summary: "Remove a network" operationId: "NetworkDelete" responses: 204: description: "No error" 403: description: "operation not supported for pre-defined networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such network" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" tags: ["Network"] /networks/create: post: summary: "Create a network" operationId: "NetworkCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "No error" schema: type: "object" title: "NetworkCreateResponse" properties: Id: description: "The ID of the created network." type: "string" Warning: type: "string" example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" 403: description: "operation not supported for pre-defined networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "plugin not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "networkConfig" in: "body" description: "Network configuration" required: true schema: type: "object" title: "NetworkCreateRequest" required: ["Name"] properties: Name: description: "The network's name." type: "string" CheckDuplicate: description: | Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions. type: "boolean" Driver: description: "Name of the network driver plugin to use." type: "string" default: "bridge" Internal: description: "Restrict external access to the network." type: "boolean" Attachable: description: | Globally scoped network is manually attachable by regular containers from workers in swarm mode. type: "boolean" Ingress: description: | Ingress network is the network which provides the routing-mesh in swarm mode. type: "boolean" IPAM: description: "Optional custom IP scheme for the network." $ref: "#/definitions/IPAM" EnableIPv6: description: "Enable IPv6 on the network." type: "boolean" Options: description: "Network specific options to be used by the drivers." type: "object" additionalProperties: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: Name: "isolated_nw" CheckDuplicate: false Driver: "bridge" EnableIPv6: true IPAM: Driver: "default" Config: - Subnet: "172.20.0.0/16" IPRange: "172.20.10.0/24" Gateway: "172.20.10.11" - Subnet: "2001:db8:abcd::/64" Gateway: "2001:db8:abcd::1011" Options: foo: "bar" Internal: true Attachable: false Ingress: false Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" tags: ["Network"] /networks/{id}/connect: post: summary: "Connect a container to a network" operationId: "NetworkConnect" consumes: - "application/json" responses: 200: description: "No error" 403: description: "Operation not supported for swarm scoped networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Network or container not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "container" in: "body" required: true schema: type: "object" title: "NetworkConnectRequest" properties: Container: type: "string" description: "The ID or name of the container to connect to the network." EndpointConfig: $ref: "#/definitions/EndpointSettings" example: Container: "3613f73ba0e4" EndpointConfig: IPAMConfig: IPv4Address: "172.24.56.89" IPv6Address: "2001:db8::5689" tags: ["Network"] /networks/{id}/disconnect: post: summary: "Disconnect a container from a network" operationId: "NetworkDisconnect" consumes: - "application/json" responses: 200: description: "No error" 403: description: "Operation not supported for swarm scoped networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Network or container not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "container" in: "body" required: true schema: type: "object" title: "NetworkDisconnectRequest" properties: Container: type: "string" description: | The ID or name of the container to disconnect from the network. Force: type: "boolean" description: | Force the container to disconnect from the network. tags: ["Network"] /networks/prune: post: summary: "Delete unused networks" produces: - "application/json" operationId: "NetworkPrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "NetworkPruneResponse" properties: NetworksDeleted: description: "Networks that were deleted" type: "array" items: type: "string" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Network"] /plugins: get: summary: "List plugins" operationId: "PluginList" description: "Returns information about installed plugins." produces: ["application/json"] responses: 200: description: "No error" schema: type: "array" items: $ref: "#/definitions/Plugin" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: - `capability=<capability name>` - `enable=<true>|<false>` tags: ["Plugin"] /plugins/privileges: get: summary: "Get plugin privileges" operationId: "GetPluginPrivileges" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/PluginPrivilegeItem" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "remote" in: "query" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: - "Plugin" /plugins/pull: post: summary: "Install a plugin" operationId: "PluginPull" description: | Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). produces: - "application/json" responses: 204: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "remote" in: "query" description: | Remote reference for plugin to install. The `:latest` tag is optional, and is used as the default if omitted. required: true type: "string" - name: "name" in: "query" description: | Local name for the pulled plugin. The `:latest` tag is optional, and is used as the default if omitted. required: false type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration to use when pulling a plugin from a registry. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "body" in: "body" schema: type: "array" items: $ref: "#/definitions/PluginPrivilegeItem" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" tags: ["Plugin"] /plugins/{name}/json: get: summary: "Inspect a plugin" operationId: "PluginInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Plugin" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: ["Plugin"] /plugins/{name}: delete: summary: "Remove a plugin" operationId: "PluginDelete" responses: 200: description: "no error" schema: $ref: "#/definitions/Plugin" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "force" in: "query" description: | Disable the plugin before removing. This may result in issues if the plugin is in use by a container. type: "boolean" default: false tags: ["Plugin"] /plugins/{name}/enable: post: summary: "Enable a plugin" operationId: "PluginEnable" responses: 200: description: "no error" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "timeout" in: "query" description: "Set the HTTP client timeout (in seconds)" type: "integer" default: 0 tags: ["Plugin"] /plugins/{name}/disable: post: summary: "Disable a plugin" operationId: "PluginDisable" responses: 200: description: "no error" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: ["Plugin"] /plugins/{name}/upgrade: post: summary: "Upgrade a plugin" operationId: "PluginUpgrade" responses: 204: description: "no error" 404: description: "plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "remote" in: "query" description: | Remote reference to upgrade to. The `:latest` tag is optional, and is used as the default if omitted. required: true type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration to use when pulling a plugin from a registry. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "body" in: "body" schema: type: "array" items: $ref: "#/definitions/PluginPrivilegeItem" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" tags: ["Plugin"] /plugins/create: post: summary: "Create a plugin" operationId: "PluginCreate" consumes: - "application/x-tar" responses: 204: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "query" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "tarContext" in: "body" description: "Path to tar containing plugin rootfs and manifest" schema: type: "string" format: "binary" tags: ["Plugin"] /plugins/{name}/push: post: summary: "Push a plugin" operationId: "PluginPush" description: | Push a plugin to the registry. parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" responses: 200: description: "no error" 404: description: "plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Plugin"] /plugins/{name}/set: post: summary: "Configure a plugin" operationId: "PluginSet" consumes: - "application/json" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "body" in: "body" schema: type: "array" items: type: "string" example: ["DEBUG=1"] responses: 204: description: "No error" 404: description: "Plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Plugin"] /nodes: get: summary: "List nodes" operationId: "NodeList" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Node" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). Available filters: - `id=<node id>` - `label=<engine label>` - `membership=`(`accepted`|`pending`)` - `name=<node name>` - `node.label=<node label>` - `role=`(`manager`|`worker`)` type: "string" tags: ["Node"] /nodes/{id}: get: summary: "Inspect a node" operationId: "NodeInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Node" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the node" type: "string" required: true tags: ["Node"] delete: summary: "Delete a node" operationId: "NodeDelete" responses: 200: description: "no error" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the node" type: "string" required: true - name: "force" in: "query" description: "Force remove a node from the swarm" default: false type: "boolean" tags: ["Node"] /nodes/{id}/update: post: summary: "Update a node" operationId: "NodeUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID of the node" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/NodeSpec" - name: "version" in: "query" description: | The version number of the node object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Node"] /swarm: get: summary: "Inspect swarm" operationId: "SwarmInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Swarm" 404: description: "no such swarm" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /swarm/init: post: summary: "Initialize a new swarm" operationId: "SwarmInit" produces: - "application/json" - "text/plain" responses: 200: description: "no error" schema: description: "The node ID" type: "string" example: "7v2t30z9blmxuhnyo6s4cpenp" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is already part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmInitRequest" properties: ListenAddr: description: | Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used. type: "string" AdvertiseAddr: description: | Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`, or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is possible to separate the container data traffic from the management traffic of the cluster. type: "string" DataPathPort: description: | DataPathPort specifies the data path port number for data traffic. Acceptable port range is 1024 to 49151. if no port is set or is set to 0, default port 4789 will be used. type: "integer" format: "uint32" DefaultAddrPool: description: | Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" example: ["10.10.0.0/16", "20.20.0.0/16"] ForceNewCluster: description: "Force creation of a new swarm." type: "boolean" SubnetSize: description: | SubnetSize specifies the subnet size of the networks created from the default subnet pool. type: "integer" format: "uint32" Spec: $ref: "#/definitions/SwarmSpec" example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" DataPathPort: 4789 DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] SubnetSize: 24 ForceNewCluster: false Spec: Orchestration: {} Raft: {} Dispatcher: {} CAConfig: {} EncryptionConfig: AutoLockManagers: false tags: ["Swarm"] /swarm/join: post: summary: "Join an existing swarm" operationId: "SwarmJoin" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is already part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmJoinRequest" properties: ListenAddr: description: | Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). type: "string" AdvertiseAddr: description: | Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`, or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is possible to separate the container data traffic from the management traffic of the cluster. type: "string" RemoteAddrs: description: | Addresses of manager nodes already participating in the swarm. type: "array" items: type: "string" JoinToken: description: "Secret token for joining this swarm." type: "string" example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" RemoteAddrs: - "node1:2377" JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" tags: ["Swarm"] /swarm/leave: post: summary: "Leave a swarm" operationId: "SwarmLeave" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "force" description: | Force leave swarm, even if this is the last manager or that it will break the cluster. in: "query" type: "boolean" default: false tags: ["Swarm"] /swarm/update: post: summary: "Update a swarm" operationId: "SwarmUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: $ref: "#/definitions/SwarmSpec" - name: "version" in: "query" description: | The version number of the swarm object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true - name: "rotateWorkerToken" in: "query" description: "Rotate the worker join token." type: "boolean" default: false - name: "rotateManagerToken" in: "query" description: "Rotate the manager join token." type: "boolean" default: false - name: "rotateManagerUnlockKey" in: "query" description: "Rotate the manager unlock key." type: "boolean" default: false tags: ["Swarm"] /swarm/unlockkey: get: summary: "Get the unlock key" operationId: "SwarmUnlockkey" consumes: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "UnlockKeyResponse" properties: UnlockKey: description: "The swarm's unlock key." type: "string" example: UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /swarm/unlock: post: summary: "Unlock a locked manager" operationId: "SwarmUnlock" consumes: - "application/json" produces: - "application/json" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmUnlockRequest" properties: UnlockKey: description: "The swarm's unlock key." type: "string" example: UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /services: get: summary: "List services" operationId: "ServiceList" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Service" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: - `id=<service id>` - `label=<service label>` - `mode=["replicated"|"global"]` - `name=<service name>` - name: "status" in: "query" type: "boolean" description: | Include service status, with count of running and desired tasks. tags: ["Service"] /services/create: post: summary: "Create a service" operationId: "ServiceCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: type: "object" title: "ServiceCreateResponse" properties: ID: description: "The ID of the created service." type: "string" Warning: description: "Optional warning message" type: "string" example: ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 403: description: "network is not eligible for services" schema: $ref: "#/definitions/ErrorResponse" 409: description: "name conflicts with an existing service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: allOf: - $ref: "#/definitions/ServiceSpec" - type: "object" example: Name: "web" TaskTemplate: ContainerSpec: Image: "nginx:alpine" Mounts: - ReadOnly: true Source: "web-data" Target: "/usr/share/nginx/html" Type: "volume" VolumeOptions: DriverConfig: {} Labels: com.example.something: "something-value" Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] User: "33" DNSConfig: Nameservers: ["8.8.8.8"] Search: ["example.org"] Options: ["timeout:3"] Secrets: - File: Name: "www.example.org.key" UID: "33" GID: "33" Mode: 384 SecretID: "fpjqlhnwb19zds35k8wn80lq9" SecretName: "example_org_domain_key" LogDriver: Name: "json-file" Options: max-file: "3" max-size: "10M" Placement: {} Resources: Limits: MemoryBytes: 104857600 Reservations: {} RestartPolicy: Condition: "on-failure" Delay: 10000000000 MaxAttempts: 10 Mode: Replicated: Replicas: 4 UpdateConfig: Parallelism: 2 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Ports: - Protocol: "tcp" PublishedPort: 8080 TargetPort: 80 Labels: foo: "bar" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration for pulling from private registries. Refer to the [authentication section](#section/Authentication) for details. type: "string" tags: ["Service"] /services/{id}: get: summary: "Inspect a service" operationId: "ServiceInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Service" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" - name: "insertDefaults" in: "query" description: "Fill empty fields with default values." type: "boolean" default: false tags: ["Service"] delete: summary: "Delete a service" operationId: "ServiceDelete" responses: 200: description: "no error" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" tags: ["Service"] /services/{id}/update: post: summary: "Update a service" operationId: "ServiceUpdate" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "no error" schema: $ref: "#/definitions/ServiceUpdateResponse" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" - name: "body" in: "body" required: true schema: allOf: - $ref: "#/definitions/ServiceSpec" - type: "object" example: Name: "top" TaskTemplate: ContainerSpec: Image: "busybox" Args: - "top" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ForceUpdate: 0 Mode: Replicated: Replicas: 1 UpdateConfig: Parallelism: 2 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Mode: "vip" - name: "version" in: "query" description: | The version number of the service object being updated. This is required to avoid conflicting writes. This version number should be the value as currently set on the service *before* the update. You can find the current version by calling `GET /services/{id}` required: true type: "integer" - name: "registryAuthFrom" in: "query" description: | If the `X-Registry-Auth` header is not specified, this parameter indicates where to find registry authorization credentials. type: "string" enum: ["spec", "previous-spec"] default: "spec" - name: "rollback" in: "query" description: | Set to this parameter to `previous` to cause a server-side rollback to the previous service spec. The supplied spec will be ignored in this case. type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration for pulling from private registries. Refer to the [authentication section](#section/Authentication) for details. type: "string" tags: ["Service"] /services/{id}/logs: get: summary: "Get service logs" description: | Get `stdout` and `stderr` logs from a service. See also [`/containers/{id}/logs`](#operation/ContainerLogs). **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "ServiceLogs" responses: 200: description: "logs returned as a stream in response body" schema: type: "string" format: "binary" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such service: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the service" type: "string" - name: "details" in: "query" description: "Show service context and extra details provided to logs." type: "boolean" default: false - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Service"] /tasks: get: summary: "List tasks" operationId: "TaskList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Task" example: - ID: "0kzzo1i0y4jz6027t0k7aezc7" Version: Index: 71 CreatedAt: "2016-06-07T21:07:31.171892745Z" UpdatedAt: "2016-06-07T21:07:31.376370513Z" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:31.290032978Z" State: "running" Message: "started" ContainerStatus: ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" PID: 677 DesiredState: "running" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.10/16" - ID: "1yljwbmlr8er2waf8orvqpwms" Version: Index: 30 CreatedAt: "2016-06-07T21:07:30.019104782Z" UpdatedAt: "2016-06-07T21:07:30.231958098Z" Name: "hopeful_cori" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:30.202183143Z" State: "shutdown" Message: "shutdown" ContainerStatus: ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" DesiredState: "shutdown" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.5/16" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: - `desired-state=(running | shutdown | accepted)` - `id=<task id>` - `label=key` or `label="key=value"` - `name=<task name>` - `node=<node id or name>` - `service=<service name>` tags: ["Task"] /tasks/{id}: get: summary: "Inspect a task" operationId: "TaskInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Task" 404: description: "no such task" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID of the task" required: true type: "string" tags: ["Task"] /tasks/{id}/logs: get: summary: "Get task logs" description: | Get `stdout` and `stderr` logs from a task. See also [`/containers/{id}/logs`](#operation/ContainerLogs). **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "TaskLogs" responses: 200: description: "logs returned as a stream in response body" schema: type: "string" format: "binary" 404: description: "no such task" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such task: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID of the task" type: "string" - name: "details" in: "query" description: "Show task context and extra details provided to logs." type: "boolean" default: false - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Task"] /secrets: get: summary: "List secrets" operationId: "SecretList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Secret" example: - ID: "blt1owaxmitz71s9v5zh81zun" Version: Index: 85 CreatedAt: "2017-07-20T13:55:28.678958722Z" UpdatedAt: "2017-07-20T13:55:28.678958722Z" Spec: Name: "mysql-passwd" Labels: some.label: "some.value" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" - ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" Labels: foo: "bar" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: - `id=<secret id>` - `label=<key> or label=<key>=value` - `name=<secret name>` - `names=<secret name>` tags: ["Secret"] /secrets/create: post: summary: "Create a secret" operationId: "SecretCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 409: description: "name conflicts with an existing object" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" schema: allOf: - $ref: "#/definitions/SecretSpec" - type: "object" example: Name: "app-key.crt" Labels: foo: "bar" Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" tags: ["Secret"] /secrets/{id}: get: summary: "Inspect a secret" operationId: "SecretInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Secret" examples: application/json: ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" Labels: foo: "bar" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" 404: description: "secret not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the secret" tags: ["Secret"] delete: summary: "Delete a secret" operationId: "SecretDelete" produces: - "application/json" responses: 204: description: "no error" 404: description: "secret not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the secret" tags: ["Secret"] /secrets/{id}/update: post: summary: "Update a Secret" operationId: "SecretUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such secret" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the secret" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/SecretSpec" description: | The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values. - name: "version" in: "query" description: | The version number of the secret object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Secret"] /configs: get: summary: "List configs" operationId: "ConfigList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Config" example: - ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "server.conf" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters: - `id=<config id>` - `label=<key> or label=<key>=value` - `name=<config name>` - `names=<config name>` tags: ["Config"] /configs/create: post: summary: "Create a config" operationId: "ConfigCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 409: description: "name conflicts with an existing object" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" schema: allOf: - $ref: "#/definitions/ConfigSpec" - type: "object" example: Name: "server.conf" Labels: foo: "bar" Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" tags: ["Config"] /configs/{id}: get: summary: "Inspect a config" operationId: "ConfigInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Config" examples: application/json: ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" 404: description: "config not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the config" tags: ["Config"] delete: summary: "Delete a config" operationId: "ConfigDelete" produces: - "application/json" responses: 204: description: "no error" 404: description: "config not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the config" tags: ["Config"] /configs/{id}/update: post: summary: "Update a Config" operationId: "ConfigUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such config" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the config" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/ConfigSpec" description: | The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values. - name: "version" in: "query" description: | The version number of the config object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Config"] /distribution/{name}/json: get: summary: "Get image information from the registry" description: | Return image digest and platform information by contacting the registry. operationId: "DistributionInspect" produces: - "application/json" responses: 200: description: "descriptor and platform information" schema: type: "object" x-go-name: DistributionInspect title: "DistributionInspectResponse" required: [Descriptor, Platforms] properties: Descriptor: type: "object" description: | A descriptor struct containing digest, media type, and size. properties: mediaType: type: "string" size: type: "integer" format: "int64" digest: type: "string" urls: type: "array" items: type: "string" Platforms: type: "array" description: | An array containing all platforms supported by the image. items: type: "object" properties: architecture: type: "string" os: type: "string" os.version: type: "string" os.features: type: "array" items: type: "string" variant: type: "string" Features: type: "array" items: type: "string" examples: application/json: Descriptor: MediaType: "application/vnd.docker.distribution.manifest.v2+json" Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" Size: 3987495 URLs: - "" Platforms: - architecture: "amd64" os: "linux" os.version: "" os.features: - "" variant: "" Features: - "" 401: description: "Failed authentication or no image found" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: someimage (tag: latest)" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or id" type: "string" required: true tags: ["Distribution"] /session: post: summary: "Initialize interactive session" description: | Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. ### Hijacking This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. For example, the client sends this request to upgrade the connection: ``` POST /session HTTP/1.1 Upgrade: h2c Connection: Upgrade ``` The Docker daemon responds with a `101 UPGRADED` response follow with the raw stream: ``` HTTP/1.1 101 UPGRADED Connection: Upgrade Upgrade: h2c ``` operationId: "Session" produces: - "application/vnd.docker.raw-stream" responses: 101: description: "no error, hijacking successful" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Session"]
# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. # # This is used for generating API documentation and the types used by the # client/server. See api/README.md for more information. # # Some style notes: # - This file is used by ReDoc, which allows GitHub Flavored Markdown in # descriptions. # - There is no maximum line length, for ease of editing and pretty diffs. # - operationIds are in the format "NounVerb", with a singular noun. swagger: "2.0" schemes: - "http" - "https" produces: - "application/json" - "text/plain" consumes: - "application/json" - "text/plain" basePath: "/v1.42" info: title: "Docker Engine API" version: "1.42" x-logo: url: "https://docs.docker.com/images/logo-docker-main.png" description: | The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. # Errors The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: ``` { "message": "page not found" } ``` # Versioning The API is usually changed in each release, so API calls are versioned to ensure that clients don't break. To lock to a specific version of the API, you prefix the URL with its version, for example, call `/v1.30/info` to use the v1.30 version of the `/info` endpoint. If the API version specified in the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. If you omit the version-prefix, the current version of the API (v1.42) is used. For example, calling `/info` is the same as calling `/v1.42/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer daemons. # Authentication Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) (JSON) string with the following structure: ``` { "username": "string", "password": "string", "email": "string", "serveraddress": "string" } ``` The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: ``` { "identitytoken": "9cbaf023786cd7..." } ``` # The tags on paths define the menu sections in the ReDoc documentation, so # the usage of tags must make sense for that: # - They should be singular, not plural. # - There should not be too many tags, or the menu becomes unwieldy. For # example, it is preferable to add a path to the "System" tag instead of # creating a tag with a single path in it. # - The order of tags in this list defines the order in the menu. tags: # Primary objects - name: "Container" x-displayName: "Containers" description: | Create and manage containers. - name: "Image" x-displayName: "Images" - name: "Network" x-displayName: "Networks" description: | Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/network/) for more information. - name: "Volume" x-displayName: "Volumes" description: | Create and manage persistent storage that can be attached to containers. - name: "Exec" x-displayName: "Exec" description: | Run new commands inside running containers. Refer to the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. # Swarm things - name: "Swarm" x-displayName: "Swarm" description: | Engines can be clustered together in a swarm. Refer to the [swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. - name: "Node" x-displayName: "Nodes" description: | Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Service" x-displayName: "Services" description: | Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Task" x-displayName: "Tasks" description: | A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. - name: "Secret" x-displayName: "Secrets" description: | Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. - name: "Config" x-displayName: "Configs" description: | Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work. # System things - name: "Plugin" x-displayName: "Plugins" - name: "System" x-displayName: "System" definitions: Port: type: "object" description: "An open port on a container" required: [PrivatePort, Type] properties: IP: type: "string" format: "ip-address" description: "Host IP address that the container's port is mapped to" PrivatePort: type: "integer" format: "uint16" x-nullable: false description: "Port on the container" PublicPort: type: "integer" format: "uint16" description: "Port exposed on the host" Type: type: "string" x-nullable: false enum: ["tcp", "udp", "sctp"] example: PrivatePort: 8080 PublicPort: 80 Type: "tcp" MountPoint: type: "object" description: "A mount point inside a container" properties: Type: type: "string" Name: type: "string" Source: type: "string" Destination: type: "string" Driver: type: "string" Mode: type: "string" RW: type: "boolean" Propagation: type: "string" DeviceMapping: type: "object" description: "A device mapping between the host and container" properties: PathOnHost: type: "string" PathInContainer: type: "string" CgroupPermissions: type: "string" example: PathOnHost: "/dev/deviceName" PathInContainer: "/dev/deviceName" CgroupPermissions: "mrw" DeviceRequest: type: "object" description: "A request for devices to be sent to device drivers" properties: Driver: type: "string" example: "nvidia" Count: type: "integer" example: -1 DeviceIDs: type: "array" items: type: "string" example: - "0" - "1" - "GPU-fef8089b-4820-abfc-e83e-94318197576e" Capabilities: description: | A list of capabilities; an OR list of AND lists of capabilities. type: "array" items: type: "array" items: type: "string" example: # gpu AND nvidia AND compute - ["gpu", "nvidia", "compute"] Options: description: | Driver-specific options, specified as a key/value pairs. These options are passed directly to the driver. type: "object" additionalProperties: type: "string" ThrottleDevice: type: "object" properties: Path: description: "Device path" type: "string" Rate: description: "Rate" type: "integer" format: "int64" minimum: 0 Mount: type: "object" properties: Target: description: "Container path." type: "string" Source: description: "Mount source (e.g. a volume name, a host path)." type: "string" Type: description: | The mount type. Available types: - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. type: "string" enum: - "bind" - "volume" - "tmpfs" - "npipe" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" Consistency: description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." type: "string" BindOptions: description: "Optional configuration for the `bind` type." type: "object" properties: Propagation: description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." type: "string" enum: - "private" - "rprivate" - "shared" - "rshared" - "slave" - "rslave" NonRecursive: description: "Disable recursive bind mount." type: "boolean" default: false VolumeOptions: description: "Optional configuration for the `volume` type." type: "object" properties: NoCopy: description: "Populate volume with data from the target." type: "boolean" default: false Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" DriverConfig: description: "Map of driver specific options" type: "object" properties: Name: description: "Name of the driver to use to create the volume." type: "string" Options: description: "key/value map of driver specific options." type: "object" additionalProperties: type: "string" TmpfsOptions: description: "Optional configuration for the `tmpfs` type." type: "object" properties: SizeBytes: description: "The size for the tmpfs mount in bytes." type: "integer" format: "int64" Mode: description: "The permission mode for the tmpfs mount in an integer." type: "integer" RestartPolicy: description: | The behavior to apply when the container exits. The default is not to restart. An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. type: "object" properties: Name: type: "string" description: | - Empty string means not to restart - `no` Do not automatically restart - `always` Always restart - `unless-stopped` Restart always except when the user has manually stopped the container - `on-failure` Restart only when the container exit code is non-zero enum: - "" - "no" - "always" - "unless-stopped" - "on-failure" MaximumRetryCount: type: "integer" description: | If `on-failure` is used, the number of times to retry before giving up. Resources: description: "A container's resources (cgroups config, ulimits, etc)" type: "object" properties: # Applicable to all platforms CpuShares: description: | An integer value representing this container's relative CPU weight versus other containers. type: "integer" Memory: description: "Memory limit in bytes." type: "integer" format: "int64" default: 0 # Applicable to UNIX platforms CgroupParent: description: | Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. type: "string" BlkioWeight: description: "Block IO weight (relative weight)." type: "integer" minimum: 0 maximum: 1000 BlkioWeightDevice: description: | Block IO weight (relative device weight) in the form: ``` [{"Path": "device_path", "Weight": weight}] ``` type: "array" items: type: "object" properties: Path: type: "string" Weight: type: "integer" minimum: 0 BlkioDeviceReadBps: description: | Limit read rate (bytes per second) from a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteBps: description: | Limit write rate (bytes per second) to a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceReadIOps: description: | Limit read rate (IO per second) from a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteIOps: description: | Limit write rate (IO per second) to a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" CpuPeriod: description: "The length of a CPU period in microseconds." type: "integer" format: "int64" CpuQuota: description: | Microseconds of CPU time that the container can get in a CPU period. type: "integer" format: "int64" CpuRealtimePeriod: description: | The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpuRealtimeRuntime: description: | The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpusetCpus: description: | CPUs in which to allow execution (e.g., `0-3`, `0,1`). type: "string" example: "0-3" CpusetMems: description: | Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. type: "string" Devices: description: "A list of devices to add to the container." type: "array" items: $ref: "#/definitions/DeviceMapping" DeviceCgroupRules: description: "a list of cgroup rules to apply to the container" type: "array" items: type: "string" example: "c 13:* rwm" DeviceRequests: description: | A list of requests for devices to be sent to device drivers. type: "array" items: $ref: "#/definitions/DeviceRequest" KernelMemory: description: | Kernel memory limit in bytes. <p><br /></p> > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated > `kmem.limit_in_bytes`. type: "integer" format: "int64" example: 209715200 KernelMemoryTCP: description: "Hard limit for kernel TCP buffer memory (in bytes)." type: "integer" format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" format: "int64" MemorySwap: description: | Total memory limit (memory + swap). Set as `-1` to enable unlimited swap. type: "integer" format: "int64" MemorySwappiness: description: | Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. type: "integer" format: "int64" minimum: 0 maximum: 100 NanoCpus: description: "CPU quota in units of 10<sup>-9</sup> CPUs." type: "integer" format: "int64" OomKillDisable: description: "Disable OOM Killer for the container." type: "boolean" Init: description: | Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. type: "boolean" x-nullable: true PidsLimit: description: | Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. type: "integer" format: "int64" x-nullable: true Ulimits: description: | A list of resource limits to set in the container. For example: ``` {"Name": "nofile", "Soft": 1024, "Hard": 2048} ``` type: "array" items: type: "object" properties: Name: description: "Name of ulimit" type: "string" Soft: description: "Soft limit" type: "integer" Hard: description: "Hard limit" type: "integer" # Applicable to Windows CpuCount: description: | The number of usable CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" CpuPercent: description: | The usable percentage of the available CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" IOMaximumIOps: description: "Maximum IOps for the container system drive (Windows only)" type: "integer" format: "int64" IOMaximumBandwidth: description: | Maximum IO in bytes per second for the container system drive (Windows only). type: "integer" format: "int64" Limit: description: | An object describing a limit on resources which can be requested by a task. type: "object" properties: NanoCPUs: type: "integer" format: "int64" example: 4000000000 MemoryBytes: type: "integer" format: "int64" example: 8272408576 Pids: description: | Limits the maximum number of PIDs in the container. Set `0` for unlimited. type: "integer" format: "int64" default: 0 example: 100 ResourceObject: description: | An object describing the resources which can be advertised by a node and requested by a task. type: "object" properties: NanoCPUs: type: "integer" format: "int64" example: 4000000000 MemoryBytes: type: "integer" format: "int64" example: 8272408576 GenericResources: $ref: "#/definitions/GenericResources" GenericResources: description: | User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`). type: "array" items: type: "object" properties: NamedResourceSpec: type: "object" properties: Kind: type: "string" Value: type: "string" DiscreteResourceSpec: type: "object" properties: Kind: type: "string" Value: type: "integer" format: "int64" example: - DiscreteResourceSpec: Kind: "SSD" Value: 3 - NamedResourceSpec: Kind: "GPU" Value: "UUID1" - NamedResourceSpec: Kind: "GPU" Value: "UUID2" HealthConfig: description: "A test to perform to check that the container is healthy." type: "object" properties: Test: description: | The test to perform. Possible values are: - `[]` inherit healthcheck from image or parent image - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell type: "array" items: type: "string" Interval: description: | The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Timeout: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Retries: description: | The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit. type: "integer" StartPeriod: description: | Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Health: description: | Health stores information about the container's healthcheck results. type: "object" properties: Status: description: | Status is one of `none`, `starting`, `healthy` or `unhealthy` - "none" Indicates there is no healthcheck - "starting" Starting indicates that the container is not yet ready - "healthy" Healthy indicates that the container is running correctly - "unhealthy" Unhealthy indicates that the container has a problem type: "string" enum: - "none" - "starting" - "healthy" - "unhealthy" example: "healthy" FailingStreak: description: "FailingStreak is the number of consecutive failures" type: "integer" example: 0 Log: type: "array" description: | Log contains the last few results (oldest first) items: x-nullable: true $ref: "#/definitions/HealthcheckResult" HealthcheckResult: description: | HealthcheckResult stores information about a single run of a healthcheck probe type: "object" properties: Start: description: | Date and time at which this check started in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "date-time" example: "2020-01-04T10:44:24.496525531Z" End: description: | Date and time at which this check ended in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2020-01-04T10:45:21.364524523Z" ExitCode: description: | ExitCode meanings: - `0` healthy - `1` unhealthy - `2` reserved (considered unhealthy) - other values: error running probe type: "integer" example: 0 Output: description: "Output from last check" type: "string" HostConfig: description: "Container configuration that depends on the host we are running on" allOf: - $ref: "#/definitions/Resources" - type: "object" properties: # Applicable to all platforms Binds: type: "array" description: | A list of volume bindings for this container. Each volume binding is a string in one of these forms: - `host-src:container-dest[:options]` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - `volume-name:container-dest[:options]` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. `options` is an optional, comma-delimited list of: - `nocopy` disables automatic copying of data from the container path to the volume. The `nocopy` flag only applies to named volumes. - `[ro|rw]` mounts a volume read-only or read-write, respectively. If omitted or set to `rw`, volumes are mounted read-write. - `[z|Z]` applies SELinux labels to allow or deny multiple containers to read and write to the same volume. - `z`: a _shared_ content label is applied to the content. This label indicates that multiple containers can share the volume content, for both reading and writing. - `Z`: a _private unshared_ label is applied to the content. This label indicates that only the current container can use a private volume. Labeling systems such as SELinux require proper labels to be placed on volume content that is mounted into a container. Without a label, the security system can prevent a container's processes from using the content. By default, the labels set by the host operating system are not modified. - `[[r]shared|[r]slave|[r]private]` specifies mount [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). This only applies to bind-mounted volumes, not internal volumes or named volumes. Mount propagation requires the source mount point (the location where the source directory is mounted in the host operating system) to have the correct propagation properties. For shared volumes, the source mount point must be set to `shared`. For slave volumes, the mount must be set to either `shared` or `slave`. items: type: "string" ContainerIDFile: type: "string" description: "Path to a file where the container ID is written" LogConfig: type: "object" description: "The logging configuration for this container" properties: Type: type: "string" enum: - "json-file" - "syslog" - "journald" - "gelf" - "fluentd" - "awslogs" - "splunk" - "etwlogs" - "none" Config: type: "object" additionalProperties: type: "string" NetworkMode: type: "string" description: | Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken as a custom network's name to which this container should connect to. PortBindings: $ref: "#/definitions/PortMap" RestartPolicy: $ref: "#/definitions/RestartPolicy" AutoRemove: type: "boolean" description: | Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set. VolumeDriver: type: "string" description: "Driver that this container uses to mount volumes." VolumesFrom: type: "array" description: | A list of volumes to inherit from another container, specified in the form `<container name>[:<ro|rw>]`. items: type: "string" Mounts: description: | Specification for mounts to be added to the container. type: "array" items: $ref: "#/definitions/Mount" # Applicable to UNIX platforms CapAdd: type: "array" description: | A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'. items: type: "string" CapDrop: type: "array" description: | A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'. items: type: "string" CgroupnsMode: type: "string" enum: - "private" - "host" description: | cgroup namespace mode for the container. Possible values are: - `"private"`: the container runs in its own private cgroup namespace - `"host"`: use the host system's cgroup namespace If not specified, the daemon default is used, which can either be `"private"` or `"host"`, depending on daemon version, kernel support and configuration. Dns: type: "array" description: "A list of DNS servers for the container to use." items: type: "string" DnsOptions: type: "array" description: "A list of DNS options." items: type: "string" DnsSearch: type: "array" description: "A list of DNS search domains." items: type: "string" ExtraHosts: type: "array" description: | A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. items: type: "string" GroupAdd: type: "array" description: | A list of additional groups that the container process will run as. items: type: "string" IpcMode: type: "string" description: | IPC sharing mode for the container. Possible values are: - `"none"`: own private IPC namespace, with /dev/shm not mounted - `"private"`: own private IPC namespace - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers - `"container:<name|id>"`: join another (shareable) container's IPC namespace - `"host"`: use the host system's IPC namespace If not specified, daemon default is used, which can either be `"private"` or `"shareable"`, depending on daemon version and configuration. Cgroup: type: "string" description: "Cgroup to use for the container." Links: type: "array" description: | A list of links for the container in the form `container_name:alias`. items: type: "string" OomScoreAdj: type: "integer" description: | An integer value containing the score given to the container in order to tune OOM killer preferences. example: 500 PidMode: type: "string" description: | Set the PID (Process) Namespace mode for the container. It can be either: - `"container:<name|id>"`: joins another container's PID namespace - `"host"`: use the host's PID namespace inside the container Privileged: type: "boolean" description: "Gives the container full access to the host." PublishAllPorts: type: "boolean" description: | Allocates an ephemeral host port for all of a container's exposed ports. Ports are de-allocated when the container stops and allocated when the container starts. The allocated port might be changed when restarting the container. The port is selected from the ephemeral port range that depends on the kernel. For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. ReadonlyRootfs: type: "boolean" description: "Mount the container's root filesystem as read only." SecurityOpt: type: "array" description: "A list of string values to customize labels for MLS systems, such as SELinux." items: type: "string" StorageOpt: type: "object" description: | Storage driver options for this container, in the form `{"size": "120G"}`. additionalProperties: type: "string" Tmpfs: type: "object" description: | A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: ``` { "/run": "rw,noexec,nosuid,size=65536k" } ``` additionalProperties: type: "string" UTSMode: type: "string" description: "UTS namespace to use for the container." UsernsMode: type: "string" description: | Sets the usernamespace mode for the container when usernamespace remapping option is enabled. ShmSize: type: "integer" description: | Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. minimum: 0 Sysctls: type: "object" description: | A list of kernel parameters (sysctls) to set in the container. For example: ``` {"net.ipv4.ip_forward": "1"} ``` additionalProperties: type: "string" Runtime: type: "string" description: "Runtime to use with this container." # Applicable to Windows ConsoleSize: type: "array" description: | Initial console size, as an `[height, width]` array. (Windows only) minItems: 2 maxItems: 2 items: type: "integer" minimum: 0 Isolation: type: "string" description: | Isolation technology of the container. (Windows only) enum: - "default" - "process" - "hyperv" MaskedPaths: type: "array" description: | The list of paths to be masked inside the container (this overrides the default set of paths). items: type: "string" ReadonlyPaths: type: "array" description: | The list of paths to be set as read-only inside the container (this overrides the default set of paths). items: type: "string" ContainerConfig: description: "Configuration for a container that is portable between hosts" type: "object" properties: Hostname: description: "The hostname to use for the container, as a valid RFC 1123 hostname." type: "string" Domainname: description: "The domain name to use for the container." type: "string" User: description: "The user that commands are run as inside the container." type: "string" AttachStdin: description: "Whether to attach to `stdin`." type: "boolean" default: false AttachStdout: description: "Whether to attach to `stdout`." type: "boolean" default: true AttachStderr: description: "Whether to attach to `stderr`." type: "boolean" default: true ExposedPorts: description: | An object mapping ports to an empty object in the form: `{"<port>/<tcp|udp|sctp>": {}}` type: "object" additionalProperties: type: "object" enum: - {} default: {} Tty: description: | Attach standard streams to a TTY, including `stdin` if it is not closed. type: "boolean" default: false OpenStdin: description: "Open `stdin`" type: "boolean" default: false StdinOnce: description: "Close `stdin` after one attached client disconnects" type: "boolean" default: false Env: description: | A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. type: "array" items: type: "string" Cmd: description: | Command to run specified as a string or an array of strings. type: "array" items: type: "string" Healthcheck: $ref: "#/definitions/HealthConfig" ArgsEscaped: description: "Command is already escaped (Windows only)" type: "boolean" Image: description: | The name of the image to use when creating the container/ type: "string" Volumes: description: | An object mapping mount point paths inside the container to empty objects. type: "object" additionalProperties: type: "object" enum: - {} default: {} WorkingDir: description: "The working directory for commands to run in." type: "string" Entrypoint: description: | The entry point for the container as a string or an array of strings. If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). type: "array" items: type: "string" NetworkDisabled: description: "Disable networking for the container." type: "boolean" MacAddress: description: "MAC address of the container." type: "string" OnBuild: description: | `ONBUILD` metadata that were defined in the image's `Dockerfile`. type: "array" items: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" StopSignal: description: | Signal to stop a container as a string or unsigned integer. type: "string" default: "SIGTERM" StopTimeout: description: "Timeout to stop a container in seconds." type: "integer" default: 10 Shell: description: | Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. type: "array" items: type: "string" NetworkingConfig: description: | NetworkingConfig represents the container's networking configuration for each of its interfaces. It is used for the networking configs specified in the `docker create` and `docker network connect` commands. type: "object" properties: EndpointsConfig: description: | A mapping of network name to endpoint configuration for that network. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" example: # putting an example here, instead of using the example values from # /definitions/EndpointSettings, because containers/create currently # does not support attaching to multiple networks, so the example request # would be confusing if it showed that multiple networks can be contained # in the EndpointsConfig. # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) EndpointsConfig: isolated_nw: IPAMConfig: IPv4Address: "172.20.30.33" IPv6Address: "2001:db8:abcd::3033" LinkLocalIPs: - "169.254.34.68" - "fe80::3468" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" NetworkSettings: description: "NetworkSettings exposes the network settings in the API" type: "object" properties: Bridge: description: Name of the network's bridge (for example, `docker0`). type: "string" example: "docker0" SandboxID: description: SandboxID uniquely represents a container's network stack. type: "string" example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" HairpinMode: description: | Indicates if hairpin NAT should be enabled on the virtual interface. type: "boolean" example: false LinkLocalIPv6Address: description: IPv6 unicast address using the link-local prefix. type: "string" example: "fe80::42:acff:fe11:1" LinkLocalIPv6PrefixLen: description: Prefix length of the IPv6 unicast address. type: "integer" example: "64" Ports: $ref: "#/definitions/PortMap" SandboxKey: description: SandboxKey identifies the sandbox type: "string" example: "/var/run/docker/netns/8ab54b426c38" # TODO is SecondaryIPAddresses actually used? SecondaryIPAddresses: description: "" type: "array" items: $ref: "#/definitions/Address" x-nullable: true # TODO is SecondaryIPv6Addresses actually used? SecondaryIPv6Addresses: description: "" type: "array" items: $ref: "#/definitions/Address" x-nullable: true # TODO properties below are part of DefaultNetworkSettings, which is # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 EndpointID: description: | EndpointID uniquely represents a service endpoint in a Sandbox. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" Gateway: description: | Gateway address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "172.17.0.1" GlobalIPv6Address: description: | Global IPv6 address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "2001:db8::5689" GlobalIPv6PrefixLen: description: | Mask length of the global IPv6 address. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "integer" example: 64 IPAddress: description: | IPv4 address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "172.17.0.4" IPPrefixLen: description: | Mask length of the IPv4 address. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "integer" example: 16 IPv6Gateway: description: | IPv6 gateway address for this network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "2001:db8:2::100" MacAddress: description: | MAC address for the container on the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "02:42:ac:11:00:04" Networks: description: | Information about all networks that the container is connected to. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" Address: description: Address represents an IPv4 or IPv6 IP address. type: "object" properties: Addr: description: IP address. type: "string" PrefixLen: description: Mask length of the IP address. type: "integer" PortMap: description: | PortMap describes the mapping of container ports to host ports, using the container's port-number and protocol as key in the format `<port>/<protocol>`, for example, `80/udp`. If a container's port is mapped for multiple protocols, separate entries are added to the mapping table. type: "object" additionalProperties: type: "array" x-nullable: true items: $ref: "#/definitions/PortBinding" example: "443/tcp": - HostIp: "127.0.0.1" HostPort: "4443" "80/tcp": - HostIp: "0.0.0.0" HostPort: "80" - HostIp: "0.0.0.0" HostPort: "8080" "80/udp": - HostIp: "0.0.0.0" HostPort: "80" "53/udp": - HostIp: "0.0.0.0" HostPort: "53" "2377/tcp": null PortBinding: description: | PortBinding represents a binding between a host IP address and a host port. type: "object" properties: HostIp: description: "Host IP address that the container's port is mapped to." type: "string" example: "127.0.0.1" HostPort: description: "Host port number that the container's port is mapped to." type: "string" example: "4443" GraphDriverData: description: "Information about a container's graph driver." type: "object" required: [Name, Data] properties: Name: type: "string" x-nullable: false Data: type: "object" x-nullable: false additionalProperties: type: "string" Image: type: "object" required: - Id - Parent - Comment - Created - Container - DockerVersion - Author - Architecture - Os - Size - VirtualSize - GraphDriver - RootFS properties: Id: type: "string" x-nullable: false RepoTags: type: "array" items: type: "string" RepoDigests: type: "array" items: type: "string" Parent: type: "string" x-nullable: false Comment: type: "string" x-nullable: false Created: type: "string" x-nullable: false Container: type: "string" x-nullable: false ContainerConfig: $ref: "#/definitions/ContainerConfig" DockerVersion: type: "string" x-nullable: false Author: type: "string" x-nullable: false Config: $ref: "#/definitions/ContainerConfig" Architecture: type: "string" x-nullable: false Os: type: "string" x-nullable: false OsVersion: type: "string" Size: type: "integer" format: "int64" x-nullable: false VirtualSize: type: "integer" format: "int64" x-nullable: false GraphDriver: $ref: "#/definitions/GraphDriverData" RootFS: type: "object" required: [Type] properties: Type: type: "string" x-nullable: false Layers: type: "array" items: type: "string" BaseLayer: type: "string" Metadata: type: "object" properties: LastTagTime: type: "string" format: "dateTime" ImageSummary: type: "object" required: - Id - ParentId - RepoTags - RepoDigests - Created - Size - SharedSize - VirtualSize - Labels - Containers properties: Id: type: "string" x-nullable: false ParentId: type: "string" x-nullable: false RepoTags: type: "array" x-nullable: false items: type: "string" RepoDigests: type: "array" x-nullable: false items: type: "string" Created: type: "integer" x-nullable: false Size: type: "integer" x-nullable: false SharedSize: type: "integer" x-nullable: false VirtualSize: type: "integer" x-nullable: false Labels: type: "object" x-nullable: false additionalProperties: type: "string" Containers: x-nullable: false type: "integer" AuthConfig: type: "object" properties: username: type: "string" password: type: "string" email: type: "string" serveraddress: type: "string" example: username: "hannibal" password: "xxxx" serveraddress: "https://index.docker.io/v1/" ProcessConfig: type: "object" properties: privileged: type: "boolean" user: type: "string" tty: type: "boolean" entrypoint: type: "string" arguments: type: "array" items: type: "string" Volume: type: "object" required: [Name, Driver, Mountpoint, Labels, Scope, Options] properties: Name: type: "string" description: "Name of the volume." x-nullable: false Driver: type: "string" description: "Name of the volume driver used by the volume." x-nullable: false Mountpoint: type: "string" description: "Mount path of the volume on the host." x-nullable: false CreatedAt: type: "string" format: "dateTime" description: "Date/Time the volume was created." Status: type: "object" description: | Low-level details about the volume, provided by the volume driver. Details are returned as a map with key/value pairs: `{"key":"value","key2":"value2"}`. The `Status` field is optional, and is omitted if the volume driver does not support this feature. additionalProperties: type: "object" Labels: type: "object" description: "User-defined key/value metadata." x-nullable: false additionalProperties: type: "string" Scope: type: "string" description: | The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. default: "local" x-nullable: false enum: ["local", "global"] Options: type: "object" description: | The driver specific options used when creating the volume. additionalProperties: type: "string" UsageData: type: "object" x-nullable: true required: [Size, RefCount] description: | Usage details about the volume. This information is used by the `GET /system/df` endpoint, and omitted in other endpoints. properties: Size: type: "integer" default: -1 description: | Amount of disk space used by the volume (in bytes). This information is only available for volumes created with the `"local"` volume driver. For volumes created with other volume drivers, this field is set to `-1` ("not available") x-nullable: false RefCount: type: "integer" default: -1 description: | The number of containers referencing this volume. This field is set to `-1` if the reference-count is not available. x-nullable: false example: Name: "tardis" Driver: "custom" Mountpoint: "/var/lib/docker/volumes/tardis" Status: hello: "world" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Scope: "local" CreatedAt: "2016-06-07T20:31:11.853781916Z" Network: type: "object" properties: Name: type: "string" Id: type: "string" Created: type: "string" format: "dateTime" Scope: type: "string" Driver: type: "string" EnableIPv6: type: "boolean" IPAM: $ref: "#/definitions/IPAM" Internal: type: "boolean" Attachable: type: "boolean" Ingress: type: "boolean" Containers: type: "object" additionalProperties: $ref: "#/definitions/NetworkContainer" Options: type: "object" additionalProperties: type: "string" Labels: type: "object" additionalProperties: type: "string" example: Name: "net01" Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" Created: "2016-10-19T04:33:30.360899459Z" Scope: "local" Driver: "bridge" EnableIPv6: false IPAM: Driver: "default" Config: - Subnet: "172.19.0.0/16" Gateway: "172.19.0.1" Options: foo: "bar" Internal: false Attachable: false Ingress: false Containers: 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: Name: "test" EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" MacAddress: "02:42:ac:13:00:02" IPv4Address: "172.19.0.2/16" IPv6Address: "" Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" IPAM: type: "object" properties: Driver: description: "Name of the IPAM driver to use." type: "string" default: "default" Config: description: | List of IPAM configuration options, specified as a map: ``` {"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>} ``` type: "array" items: type: "object" additionalProperties: type: "string" Options: description: "Driver-specific options, specified as a map." type: "object" additionalProperties: type: "string" NetworkContainer: type: "object" properties: Name: type: "string" EndpointID: type: "string" MacAddress: type: "string" IPv4Address: type: "string" IPv6Address: type: "string" BuildInfo: type: "object" properties: id: type: "string" stream: type: "string" error: type: "string" errorDetail: $ref: "#/definitions/ErrorDetail" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" aux: $ref: "#/definitions/ImageID" BuildCache: type: "object" properties: ID: type: "string" Parent: type: "string" Type: type: "string" Description: type: "string" InUse: type: "boolean" Shared: type: "boolean" Size: description: | Amount of disk space used by the build cache (in bytes). type: "integer" CreatedAt: description: | Date and time at which the build cache was created in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" LastUsedAt: description: | Date and time at which the build cache was last used in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" x-nullable: true example: "2017-08-09T07:09:37.632105588Z" UsageCount: type: "integer" ImageID: type: "object" description: "Image ID or Digest" properties: ID: type: "string" example: ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" CreateImageInfo: type: "object" properties: id: type: "string" error: type: "string" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" PushImageInfo: type: "object" properties: error: type: "string" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" ErrorDetail: type: "object" properties: code: type: "integer" message: type: "string" ProgressDetail: type: "object" properties: current: type: "integer" total: type: "integer" ErrorResponse: description: "Represents an error." type: "object" required: ["message"] properties: message: description: "The error message." type: "string" x-nullable: false example: message: "Something went wrong." IdResponse: description: "Response to an API call that returns just an Id" type: "object" required: ["Id"] properties: Id: description: "The id of the newly created object." type: "string" x-nullable: false EndpointSettings: description: "Configuration for a network endpoint." type: "object" properties: # Configurations IPAMConfig: $ref: "#/definitions/EndpointIPAMConfig" Links: type: "array" items: type: "string" example: - "container_1" - "container_2" Aliases: type: "array" items: type: "string" example: - "server_x" - "server_y" # Operational data NetworkID: description: | Unique ID of the network. type: "string" example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" EndpointID: description: | Unique ID for the service endpoint in a Sandbox. type: "string" example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" Gateway: description: | Gateway address for this network. type: "string" example: "172.17.0.1" IPAddress: description: | IPv4 address. type: "string" example: "172.17.0.4" IPPrefixLen: description: | Mask length of the IPv4 address. type: "integer" example: 16 IPv6Gateway: description: | IPv6 gateway address. type: "string" example: "2001:db8:2::100" GlobalIPv6Address: description: | Global IPv6 address. type: "string" example: "2001:db8::5689" GlobalIPv6PrefixLen: description: | Mask length of the global IPv6 address. type: "integer" format: "int64" example: 64 MacAddress: description: | MAC address for the endpoint on this network. type: "string" example: "02:42:ac:11:00:04" DriverOpts: description: | DriverOpts is a mapping of driver options and values. These options are passed directly to the driver and are driver specific. type: "object" x-nullable: true additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" EndpointIPAMConfig: description: | EndpointIPAMConfig represents an endpoint's IPAM configuration. type: "object" x-nullable: true properties: IPv4Address: type: "string" example: "172.20.30.33" IPv6Address: type: "string" example: "2001:db8:abcd::3033" LinkLocalIPs: type: "array" items: type: "string" example: - "169.254.34.68" - "fe80::3468" PluginMount: type: "object" x-nullable: false required: [Name, Description, Settable, Source, Destination, Type, Options] properties: Name: type: "string" x-nullable: false example: "some-mount" Description: type: "string" x-nullable: false example: "This is a mount that's used by the plugin." Settable: type: "array" items: type: "string" Source: type: "string" example: "/var/lib/docker/plugins/" Destination: type: "string" x-nullable: false example: "/mnt/state" Type: type: "string" x-nullable: false example: "bind" Options: type: "array" items: type: "string" example: - "rbind" - "rw" PluginDevice: type: "object" required: [Name, Description, Settable, Path] x-nullable: false properties: Name: type: "string" x-nullable: false Description: type: "string" x-nullable: false Settable: type: "array" items: type: "string" Path: type: "string" example: "/dev/fuse" PluginEnv: type: "object" x-nullable: false required: [Name, Description, Settable, Value] properties: Name: x-nullable: false type: "string" Description: x-nullable: false type: "string" Settable: type: "array" items: type: "string" Value: type: "string" PluginInterfaceType: type: "object" x-nullable: false required: [Prefix, Capability, Version] properties: Prefix: type: "string" x-nullable: false Capability: type: "string" x-nullable: false Version: type: "string" x-nullable: false PluginPrivilege: description: | Describes a permission the user has to accept upon installing the plugin. type: "object" x-go-name: "PluginPrivilege" properties: Name: type: "string" example: "network" Description: type: "string" Value: type: "array" items: type: "string" example: - "host" Plugin: description: "A plugin for the Engine API" type: "object" required: [Settings, Enabled, Config, Name] properties: Id: type: "string" example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" Name: type: "string" x-nullable: false example: "tiborvass/sample-volume-plugin" Enabled: description: True if the plugin is running. False if the plugin is not running, only installed. type: "boolean" x-nullable: false example: true Settings: description: "Settings that can be modified by users." type: "object" x-nullable: false required: [Args, Devices, Env, Mounts] properties: Mounts: type: "array" items: $ref: "#/definitions/PluginMount" Env: type: "array" items: type: "string" example: - "DEBUG=0" Args: type: "array" items: type: "string" Devices: type: "array" items: $ref: "#/definitions/PluginDevice" PluginReference: description: "plugin remote reference used to push/pull the plugin" type: "string" x-nullable: false example: "localhost:5000/tiborvass/sample-volume-plugin:latest" Config: description: "The config of a plugin." type: "object" x-nullable: false required: - Description - Documentation - Interface - Entrypoint - WorkDir - Network - Linux - PidHost - PropagatedMount - IpcHost - Mounts - Env - Args properties: DockerVersion: description: "Docker Version used to create the plugin" type: "string" x-nullable: false example: "17.06.0-ce" Description: type: "string" x-nullable: false example: "A sample volume plugin for Docker" Documentation: type: "string" x-nullable: false example: "https://docs.docker.com/engine/extend/plugins/" Interface: description: "The interface between Docker and the plugin" x-nullable: false type: "object" required: [Types, Socket] properties: Types: type: "array" items: $ref: "#/definitions/PluginInterfaceType" example: - "docker.volumedriver/1.0" Socket: type: "string" x-nullable: false example: "plugins.sock" ProtocolScheme: type: "string" example: "some.protocol/v1.0" description: "Protocol to use for clients connecting to the plugin." enum: - "" - "moby.plugins.http/v1" Entrypoint: type: "array" items: type: "string" example: - "/usr/bin/sample-volume-plugin" - "/data" WorkDir: type: "string" x-nullable: false example: "/bin/" User: type: "object" x-nullable: false properties: UID: type: "integer" format: "uint32" example: 1000 GID: type: "integer" format: "uint32" example: 1000 Network: type: "object" x-nullable: false required: [Type] properties: Type: x-nullable: false type: "string" example: "host" Linux: type: "object" x-nullable: false required: [Capabilities, AllowAllDevices, Devices] properties: Capabilities: type: "array" items: type: "string" example: - "CAP_SYS_ADMIN" - "CAP_SYSLOG" AllowAllDevices: type: "boolean" x-nullable: false example: false Devices: type: "array" items: $ref: "#/definitions/PluginDevice" PropagatedMount: type: "string" x-nullable: false example: "/mnt/volumes" IpcHost: type: "boolean" x-nullable: false example: false PidHost: type: "boolean" x-nullable: false example: false Mounts: type: "array" items: $ref: "#/definitions/PluginMount" Env: type: "array" items: $ref: "#/definitions/PluginEnv" example: - Name: "DEBUG" Description: "If set, prints debug messages" Settable: null Value: "0" Args: type: "object" x-nullable: false required: [Name, Description, Settable, Value] properties: Name: x-nullable: false type: "string" example: "args" Description: x-nullable: false type: "string" example: "command line arguments" Settable: type: "array" items: type: "string" Value: type: "array" items: type: "string" rootfs: type: "object" properties: type: type: "string" example: "layers" diff_ids: type: "array" items: type: "string" example: - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" ObjectVersion: description: | The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. The client must send the version number along with the modified specification when updating these objects. This approach ensures safe concurrency and determinism in that the change on the object may not be applied if the version number has changed from the last read. In other words, if two update requests specify the same base version, only one of the requests can succeed. As a result, two separate update requests that happen at the same time will not unintentionally overwrite each other. type: "object" properties: Index: type: "integer" format: "uint64" example: 373531 NodeSpec: type: "object" properties: Name: description: "Name for the node." type: "string" example: "my-node" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Role: description: "Role of the node." type: "string" enum: - "worker" - "manager" example: "manager" Availability: description: "Availability of the node." type: "string" enum: - "active" - "pause" - "drain" example: "active" example: Availability: "active" Name: "node-name" Role: "manager" Labels: foo: "bar" Node: type: "object" properties: ID: type: "string" example: "24ifsmvkjbyhk" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: description: | Date and time at which the node was added to the swarm in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: description: | Date and time at which the node was last updated in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/NodeSpec" Description: $ref: "#/definitions/NodeDescription" Status: $ref: "#/definitions/NodeStatus" ManagerStatus: $ref: "#/definitions/ManagerStatus" NodeDescription: description: | NodeDescription encapsulates the properties of the Node as reported by the agent. type: "object" properties: Hostname: type: "string" example: "bf3067039e47" Platform: $ref: "#/definitions/Platform" Resources: $ref: "#/definitions/ResourceObject" Engine: $ref: "#/definitions/EngineDescription" TLSInfo: $ref: "#/definitions/TLSInfo" Platform: description: | Platform represents the platform (Arch/OS). type: "object" properties: Architecture: description: | Architecture represents the hardware architecture (for example, `x86_64`). type: "string" example: "x86_64" OS: description: | OS represents the Operating System (for example, `linux` or `windows`). type: "string" example: "linux" EngineDescription: description: "EngineDescription provides information about an engine." type: "object" properties: EngineVersion: type: "string" example: "17.06.0" Labels: type: "object" additionalProperties: type: "string" example: foo: "bar" Plugins: type: "array" items: type: "object" properties: Type: type: "string" Name: type: "string" example: - Type: "Log" Name: "awslogs" - Type: "Log" Name: "fluentd" - Type: "Log" Name: "gcplogs" - Type: "Log" Name: "gelf" - Type: "Log" Name: "journald" - Type: "Log" Name: "json-file" - Type: "Log" Name: "logentries" - Type: "Log" Name: "splunk" - Type: "Log" Name: "syslog" - Type: "Network" Name: "bridge" - Type: "Network" Name: "host" - Type: "Network" Name: "ipvlan" - Type: "Network" Name: "macvlan" - Type: "Network" Name: "null" - Type: "Network" Name: "overlay" - Type: "Volume" Name: "local" - Type: "Volume" Name: "localhost:5000/vieux/sshfs:latest" - Type: "Volume" Name: "vieux/sshfs:latest" TLSInfo: description: | Information about the issuer of leaf TLS certificates and the trusted root CA certificate. type: "object" properties: TrustRoot: description: | The root CA certificate(s) that are used to validate leaf TLS certificates. type: "string" CertIssuerSubject: description: The base64-url-safe-encoded raw subject bytes of the issuer. type: "string" CertIssuerPublicKey: description: | The base64-url-safe-encoded raw public key bytes of the issuer. type: "string" example: TrustRoot: | -----BEGIN CERTIFICATE----- MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H -----END CERTIFICATE----- CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" NodeStatus: description: | NodeStatus represents the status of a node. It provides the current status of the node, as seen by the manager. type: "object" properties: State: $ref: "#/definitions/NodeState" Message: type: "string" example: "" Addr: description: "IP address of the node." type: "string" example: "172.17.0.2" NodeState: description: "NodeState represents the state of a node." type: "string" enum: - "unknown" - "down" - "ready" - "disconnected" example: "ready" ManagerStatus: description: | ManagerStatus represents the status of a manager. It provides the current status of a node's manager component, if the node is a manager. x-nullable: true type: "object" properties: Leader: type: "boolean" default: false example: true Reachability: $ref: "#/definitions/Reachability" Addr: description: | The IP address and port at which the manager is reachable. type: "string" example: "10.0.0.46:2377" Reachability: description: "Reachability represents the reachability of a node." type: "string" enum: - "unknown" - "unreachable" - "reachable" example: "reachable" SwarmSpec: description: "User modifiable swarm configuration." type: "object" properties: Name: description: "Name of the swarm." type: "string" example: "default" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.corp.type: "production" com.example.corp.department: "engineering" Orchestration: description: "Orchestration configuration." type: "object" x-nullable: true properties: TaskHistoryRetentionLimit: description: | The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks. type: "integer" format: "int64" example: 10 Raft: description: "Raft configuration." type: "object" properties: SnapshotInterval: description: "The number of log entries between snapshots." type: "integer" format: "uint64" example: 10000 KeepOldSnapshots: description: | The number of snapshots to keep beyond the current snapshot. type: "integer" format: "uint64" LogEntriesForSlowFollowers: description: | The number of log entries to keep around to sync up slow followers after a snapshot is created. type: "integer" format: "uint64" example: 500 ElectionTick: description: | The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 3 HeartbeatTick: description: | The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 1 Dispatcher: description: "Dispatcher configuration." type: "object" x-nullable: true properties: HeartbeatPeriod: description: | The delay for an agent to send a heartbeat to the dispatcher. type: "integer" format: "int64" example: 5000000000 CAConfig: description: "CA configuration." type: "object" x-nullable: true properties: NodeCertExpiry: description: "The duration node certificates are issued for." type: "integer" format: "int64" example: 7776000000000000 ExternalCAs: description: | Configuration for forwarding signing requests to an external certificate authority. type: "array" items: type: "object" properties: Protocol: description: | Protocol for communication with the external CA (currently only `cfssl` is supported). type: "string" enum: - "cfssl" default: "cfssl" URL: description: | URL where certificate signing requests should be sent. type: "string" Options: description: | An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver. type: "object" additionalProperties: type: "string" CACert: description: | The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided). type: "string" SigningCACert: description: | The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format. type: "string" SigningCAKey: description: | The desired signing CA key for all swarm node TLS leaf certificates, in PEM format. type: "string" ForceRotate: description: | An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey` format: "uint64" type: "integer" EncryptionConfig: description: "Parameters related to encryption-at-rest." type: "object" properties: AutoLockManagers: description: | If set, generate a key and use it to lock data stored on the managers. type: "boolean" example: false TaskDefaults: description: "Defaults for creating tasks in this cluster." type: "object" properties: LogDriver: description: | The log driver to use for tasks created in the orchestrator if unspecified by a service. Updating this value only affects new tasks. Existing tasks continue to use their previously configured log driver until recreated. type: "object" properties: Name: description: | The log driver to use as a default for new tasks. type: "string" example: "json-file" Options: description: | Driver-specific options for the selectd log driver, specified as key/value pairs. type: "object" additionalProperties: type: "string" example: "max-file": "10" "max-size": "100m" # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but # without `JoinTokens`. ClusterInfo: description: | ClusterInfo represents information about the swarm as is returned by the "/info" endpoint. Join-tokens are not included. x-nullable: true type: "object" properties: ID: description: "The ID of the swarm." type: "string" example: "abajmipo7b4xz5ip2nrla6b11" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: description: | Date and time at which the swarm was initialised in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: description: | Date and time at which the swarm was last updated in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/SwarmSpec" TLSInfo: $ref: "#/definitions/TLSInfo" RootRotationInProgress: description: | Whether there is currently a root CA rotation in progress for the swarm type: "boolean" example: false DataPathPort: description: | DataPathPort specifies the data path port number for data traffic. Acceptable port range is 1024 to 49151. If no port is set or is set to 0, the default port (4789) is used. type: "integer" format: "uint32" default: 4789 example: 4789 DefaultAddrPool: description: | Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" format: "CIDR" example: ["10.10.0.0/16", "20.20.0.0/16"] SubnetSize: description: | SubnetSize specifies the subnet size of the networks created from the default subnet pool. type: "integer" format: "uint32" maximum: 29 default: 24 example: 24 JoinTokens: description: | JoinTokens contains the tokens workers and managers need to join the swarm. type: "object" properties: Worker: description: | The token workers can use to join the swarm. type: "string" example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" Manager: description: | The token managers can use to join the swarm. type: "string" example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" Swarm: type: "object" allOf: - $ref: "#/definitions/ClusterInfo" - type: "object" properties: JoinTokens: $ref: "#/definitions/JoinTokens" TaskSpec: description: "User modifiable task configuration." type: "object" properties: PluginSpec: type: "object" description: | Plugin spec for the service. *(Experimental release only.)* <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. properties: Name: description: "The name or 'alias' to use for the plugin." type: "string" Remote: description: "The plugin image reference to use." type: "string" Disabled: description: "Disable the plugin once scheduled." type: "boolean" PluginPrivilege: type: "array" items: $ref: "#/definitions/PluginPrivilege" ContainerSpec: type: "object" description: | Container spec for the service. <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. properties: Image: description: "The image name to use for the container" type: "string" Labels: description: "User-defined key/value data." type: "object" additionalProperties: type: "string" Command: description: "The command to be run in the image." type: "array" items: type: "string" Args: description: "Arguments to the command." type: "array" items: type: "string" Hostname: description: | The hostname to use for the container, as a valid [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. type: "string" Env: description: | A list of environment variables in the form `VAR=value`. type: "array" items: type: "string" Dir: description: "The working directory for commands to run in." type: "string" User: description: "The user inside the container." type: "string" Groups: type: "array" description: | A list of additional groups that the container process will run as. items: type: "string" Privileges: type: "object" description: "Security options for the container" properties: CredentialSpec: type: "object" description: "CredentialSpec for managed service account (Windows only)" properties: Config: type: "string" example: "0bt9dmxjvjiqermk6xrop3ekq" description: | Load credential spec from a Swarm Config with the given ID. The specified config must also be present in the Configs field with the Runtime property set. <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. File: type: "string" example: "spec.json" description: | Load credential spec from this file. The file is read by the daemon, and must be present in the `CredentialSpecs` subdirectory in the docker data directory, which defaults to `C:\ProgramData\Docker\` on Windows. For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`. <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. Registry: type: "string" description: | Load credential spec from this value in the Windows registry. The specified registry value must be located in: `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. SELinuxContext: type: "object" description: "SELinux labels of the container" properties: Disable: type: "boolean" description: "Disable SELinux" User: type: "string" description: "SELinux user label" Role: type: "string" description: "SELinux role label" Type: type: "string" description: "SELinux type label" Level: type: "string" description: "SELinux level label" TTY: description: "Whether a pseudo-TTY should be allocated." type: "boolean" OpenStdin: description: "Open `stdin`" type: "boolean" ReadOnly: description: "Mount the container's root filesystem as read only." type: "boolean" Mounts: description: | Specification for mounts to be added to containers created as part of the service. type: "array" items: $ref: "#/definitions/Mount" StopSignal: description: "Signal to stop the container." type: "string" StopGracePeriod: description: | Amount of time to wait for the container to terminate before forcefully killing it. type: "integer" format: "int64" HealthCheck: $ref: "#/definitions/HealthConfig" Hosts: type: "array" description: | A list of hostname/IP mappings to add to the container's `hosts` file. The format of extra hosts is specified in the [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) man page: IP_address canonical_hostname [aliases...] items: type: "string" DNSConfig: description: | Specification for DNS related configurations in resolver configuration file (`resolv.conf`). type: "object" properties: Nameservers: description: "The IP addresses of the name servers." type: "array" items: type: "string" Search: description: "A search list for host-name lookup." type: "array" items: type: "string" Options: description: | A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.). type: "array" items: type: "string" Secrets: description: | Secrets contains references to zero or more secrets that will be exposed to the service. type: "array" items: type: "object" properties: File: description: | File represents a specific target that is backed by a file. type: "object" properties: Name: description: | Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." type: "string" GID: description: "GID represents the file GID." type: "string" Mode: description: "Mode represents the FileMode of the file." type: "integer" format: "uint32" SecretID: description: | SecretID represents the ID of the specific secret that we're referencing. type: "string" SecretName: description: | SecretName is the name of the secret that this references, but this is just provided for lookup/display purposes. The secret in the reference will be identified by its ID. type: "string" Configs: description: | Configs contains references to zero or more configs that will be exposed to the service. type: "array" items: type: "object" properties: File: description: | File represents a specific target that is backed by a file. <p><br /><p> > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive type: "object" properties: Name: description: | Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." type: "string" GID: description: "GID represents the file GID." type: "string" Mode: description: "Mode represents the FileMode of the file." type: "integer" format: "uint32" Runtime: description: | Runtime represents a target that is not mounted into the container but is used by the task <p><br /><p> > **Note**: `Configs.File` and `Configs.Runtime` are mutually > exclusive type: "object" ConfigID: description: | ConfigID represents the ID of the specific config that we're referencing. type: "string" ConfigName: description: | ConfigName is the name of the config that this references, but this is just provided for lookup/display purposes. The config in the reference will be identified by its ID. type: "string" Isolation: type: "string" description: | Isolation technology of the containers running the service. (Windows only) enum: - "default" - "process" - "hyperv" Init: description: | Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. type: "boolean" x-nullable: true Sysctls: description: | Set kernel namedspaced parameters (sysctls) in the container. The Sysctls option on services accepts the same sysctls as the are supported on containers. Note that while the same sysctls are supported, no guarantees or checks are made about their suitability for a clustered environment, and it's up to the user to determine whether a given sysctl will work properly in a Service. type: "object" additionalProperties: type: "string" # This option is not used by Windows containers CapabilityAdd: type: "array" description: | A list of kernel capabilities to add to the default set for the container. items: type: "string" example: - "CAP_NET_RAW" - "CAP_SYS_ADMIN" - "CAP_SYS_CHROOT" - "CAP_SYSLOG" CapabilityDrop: type: "array" description: | A list of kernel capabilities to drop from the default set for the container. items: type: "string" example: - "CAP_NET_RAW" Ulimits: description: | A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" type: "array" items: type: "object" properties: Name: description: "Name of ulimit" type: "string" Soft: description: "Soft limit" type: "integer" Hard: description: "Hard limit" type: "integer" NetworkAttachmentSpec: description: | Read-only spec type for non-swarm containers attached to swarm overlay networks. <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. type: "object" properties: ContainerID: description: "ID of the container represented by this task" type: "string" Resources: description: | Resource requirements which apply to each individual container created as part of the service. type: "object" properties: Limits: description: "Define resources limits." $ref: "#/definitions/Limit" Reservation: description: "Define resources reservation." $ref: "#/definitions/ResourceObject" RestartPolicy: description: | Specification for the restart policy which applies to containers created as part of this service. type: "object" properties: Condition: description: "Condition for restart." type: "string" enum: - "none" - "on-failure" - "any" Delay: description: "Delay between restart attempts." type: "integer" format: "int64" MaxAttempts: description: | Maximum attempts to restart a given container before giving up (default value is 0, which is ignored). type: "integer" format: "int64" default: 0 Window: description: | Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded). type: "integer" format: "int64" default: 0 Placement: type: "object" properties: Constraints: description: | An array of constraint expressions to limit the set of nodes where a task can be scheduled. Constraint expressions can either use a _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find nodes that satisfy every expression (AND match). Constraints can match node or Docker Engine labels as follows: node attribute | matches | example ---------------------|--------------------------------|----------------------------------------------- `node.id` | Node ID | `node.id==2ivku8v2gvtg4` `node.hostname` | Node hostname | `node.hostname!=node-2` `node.role` | Node role (`manager`/`worker`) | `node.role==manager` `node.platform.os` | Node operating system | `node.platform.os==windows` `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` `node.labels` | User-defined node labels | `node.labels.security==high` `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` `engine.labels` apply to Docker Engine labels like operating system, drivers, etc. Swarm administrators add `node.labels` for operational purposes by using the [`node update endpoint`](#operation/NodeUpdate). type: "array" items: type: "string" example: - "node.hostname!=node3.corp.example.com" - "node.role!=manager" - "node.labels.type==production" - "node.platform.os==linux" - "node.platform.arch==x86_64" Preferences: description: | Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence. type: "array" items: type: "object" properties: Spread: type: "object" properties: SpreadDescriptor: description: | label descriptor, such as `engine.labels.az`. type: "string" example: - Spread: SpreadDescriptor: "node.labels.datacenter" - Spread: SpreadDescriptor: "node.labels.rack" MaxReplicas: description: | Maximum number of replicas for per node (default value is 0, which is unlimited) type: "integer" format: "int64" default: 0 Platforms: description: | Platforms stores all the platforms that the service's image can run on. This field is used in the platform filter for scheduling. If empty, then the platform filter is off, meaning there are no scheduling restrictions. type: "array" items: $ref: "#/definitions/Platform" ForceUpdate: description: | A counter that triggers an update even if no relevant parameters have been changed. type: "integer" Runtime: description: | Runtime is the type of runtime specified for the task executor. type: "string" Networks: description: "Specifies which networks the service should attach to." type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" LogDriver: description: | Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified. type: "object" properties: Name: type: "string" Options: type: "object" additionalProperties: type: "string" TaskState: type: "string" enum: - "new" - "allocated" - "pending" - "assigned" - "accepted" - "preparing" - "ready" - "starting" - "running" - "complete" - "shutdown" - "failed" - "rejected" - "remove" - "orphaned" Task: type: "object" properties: ID: description: "The ID of the task." type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Name: description: "Name of the task." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Spec: $ref: "#/definitions/TaskSpec" ServiceID: description: "The ID of the service this task is part of." type: "string" Slot: type: "integer" NodeID: description: "The ID of the node that this task is on." type: "string" AssignedGenericResources: $ref: "#/definitions/GenericResources" Status: type: "object" properties: Timestamp: type: "string" format: "dateTime" State: $ref: "#/definitions/TaskState" Message: type: "string" Err: type: "string" ContainerStatus: type: "object" properties: ContainerID: type: "string" PID: type: "integer" ExitCode: type: "integer" DesiredState: $ref: "#/definitions/TaskState" JobIteration: description: | If the Service this Task belongs to is a job-mode service, contains the JobIteration of the Service this Task was created for. Absent if the Task was created for a Replicated or Global Service. $ref: "#/definitions/ObjectVersion" example: ID: "0kzzo1i0y4jz6027t0k7aezc7" Version: Index: 71 CreatedAt: "2016-06-07T21:07:31.171892745Z" UpdatedAt: "2016-06-07T21:07:31.376370513Z" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:31.290032978Z" State: "running" Message: "started" ContainerStatus: ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" PID: 677 DesiredState: "running" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.10/16" AssignedGenericResources: - DiscreteResourceSpec: Kind: "SSD" Value: 3 - NamedResourceSpec: Kind: "GPU" Value: "UUID1" - NamedResourceSpec: Kind: "GPU" Value: "UUID2" ServiceSpec: description: "User modifiable configuration for a service." properties: Name: description: "Name of the service." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" TaskTemplate: $ref: "#/definitions/TaskSpec" Mode: description: "Scheduling mode for the service." type: "object" properties: Replicated: type: "object" properties: Replicas: type: "integer" format: "int64" Global: type: "object" ReplicatedJob: description: | The mode used for services with a finite number of tasks that run to a completed state. type: "object" properties: MaxConcurrent: description: | The maximum number of replicas to run simultaneously. type: "integer" format: "int64" default: 1 TotalCompletions: description: | The total number of replicas desired to reach the Completed state. If unset, will default to the value of `MaxConcurrent` type: "integer" format: "int64" GlobalJob: description: | The mode used for services which run a task to the completed state on each valid node. type: "object" UpdateConfig: description: "Specification for the update strategy of the service." type: "object" properties: Parallelism: description: | Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism). type: "integer" format: "int64" Delay: description: "Amount of time between updates, in nanoseconds." type: "integer" format: "int64" FailureAction: description: | Action to take if an updated task fails to run, or stops running during the update. type: "string" enum: - "continue" - "pause" - "rollback" Monitor: description: | Amount of time to monitor each updated task for failures, in nanoseconds. type: "integer" format: "int64" MaxFailureRatio: description: | The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1. type: "number" default: 0 Order: description: | The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down. type: "string" enum: - "stop-first" - "start-first" RollbackConfig: description: "Specification for the rollback strategy of the service." type: "object" properties: Parallelism: description: | Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism). type: "integer" format: "int64" Delay: description: | Amount of time between rollback iterations, in nanoseconds. type: "integer" format: "int64" FailureAction: description: | Action to take if an rolled back task fails to run, or stops running during the rollback. type: "string" enum: - "continue" - "pause" Monitor: description: | Amount of time to monitor each rolled back task for failures, in nanoseconds. type: "integer" format: "int64" MaxFailureRatio: description: | The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1. type: "number" default: 0 Order: description: | The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down. type: "string" enum: - "stop-first" - "start-first" Networks: description: "Specifies which networks the service should attach to." type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" EndpointSpec: $ref: "#/definitions/EndpointSpec" EndpointPortConfig: type: "object" properties: Name: type: "string" Protocol: type: "string" enum: - "tcp" - "udp" - "sctp" TargetPort: description: "The port inside the container." type: "integer" PublishedPort: description: "The port on the swarm hosts." type: "integer" PublishMode: description: | The mode in which port is published. <p><br /></p> - "ingress" makes the target port accessible on every node, regardless of whether there is a task for the service running on that node or not. - "host" bypasses the routing mesh and publish the port directly on the swarm node where that service is running. type: "string" enum: - "ingress" - "host" default: "ingress" example: "ingress" EndpointSpec: description: "Properties that can be configured to access and load balance a service." type: "object" properties: Mode: description: | The mode of resolution to use for internal load balancing between tasks. type: "string" enum: - "vip" - "dnsrr" default: "vip" Ports: description: | List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used. type: "array" items: $ref: "#/definitions/EndpointPortConfig" Service: type: "object" properties: ID: type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Spec: $ref: "#/definitions/ServiceSpec" Endpoint: type: "object" properties: Spec: $ref: "#/definitions/EndpointSpec" Ports: type: "array" items: $ref: "#/definitions/EndpointPortConfig" VirtualIPs: type: "array" items: type: "object" properties: NetworkID: type: "string" Addr: type: "string" UpdateStatus: description: "The status of a service update." type: "object" properties: State: type: "string" enum: - "updating" - "paused" - "completed" StartedAt: type: "string" format: "dateTime" CompletedAt: type: "string" format: "dateTime" Message: type: "string" ServiceStatus: description: | The status of the service's tasks. Provided only when requested as part of a ServiceList operation. type: "object" properties: RunningTasks: description: | The number of tasks for the service currently in the Running state. type: "integer" format: "uint64" example: 7 DesiredTasks: description: | The number of tasks for the service desired to be running. For replicated services, this is the replica count from the service spec. For global services, this is computed by taking count of all tasks for the service with a Desired State other than Shutdown. type: "integer" format: "uint64" example: 10 CompletedTasks: description: | The number of tasks for a job that are in the Completed state. This field must be cross-referenced with the service type, as the value of 0 may mean the service is not in a job mode, or it may mean the job-mode service has no tasks yet Completed. type: "integer" format: "uint64" JobStatus: description: | The status of the service when it is in one of ReplicatedJob or GlobalJob modes. Absent on Replicated and Global mode services. The JobIteration is an ObjectVersion, but unlike the Service's version, does not need to be sent with an update request. type: "object" properties: JobIteration: description: | JobIteration is a value increased each time a Job is executed, successfully or otherwise. "Executed", in this case, means the job as a whole has been started, not that an individual Task has been launched. A job is "Executed" when its ServiceSpec is updated. JobIteration can be used to disambiguate Tasks belonging to different executions of a job. Though JobIteration will increase with each subsequent execution, it may not necessarily increase by 1, and so JobIteration should not be used to $ref: "#/definitions/ObjectVersion" LastExecution: description: | The last time, as observed by the server, that this job was started. type: "string" format: "dateTime" example: ID: "9mnpnzenvg8p8tdbtq4wvbkcz" Version: Index: 19 CreatedAt: "2016-06-07T21:05:51.880065305Z" UpdatedAt: "2016-06-07T21:07:29.962229872Z" Spec: Name: "hopeful_cori" TaskTemplate: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ForceUpdate: 0 Mode: Replicated: Replicas: 1 UpdateConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Mode: "vip" Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 Endpoint: Spec: Mode: "vip" Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 VirtualIPs: - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" Addr: "10.255.0.2/16" - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" Addr: "10.255.0.3/16" ImageDeleteResponseItem: type: "object" properties: Untagged: description: "The image ID of an image that was untagged" type: "string" Deleted: description: "The image ID of an image that was deleted" type: "string" ServiceUpdateResponse: type: "object" properties: Warnings: description: "Optional warning messages" type: "array" items: type: "string" example: Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" ContainerSummary: type: "object" properties: Id: description: "The ID of this container" type: "string" x-go-name: "ID" Names: description: "The names that this container has been given" type: "array" items: type: "string" Image: description: "The name of the image used when creating this container" type: "string" ImageID: description: "The ID of the image that this container was created from" type: "string" Command: description: "Command to run when starting the container" type: "string" Created: description: "When the container was created" type: "integer" format: "int64" Ports: description: "The ports exposed by this container" type: "array" items: $ref: "#/definitions/Port" SizeRw: description: "The size of files that have been created or changed by this container" type: "integer" format: "int64" SizeRootFs: description: "The total size of all the files in this container" type: "integer" format: "int64" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" State: description: "The state of this container (e.g. `Exited`)" type: "string" Status: description: "Additional human-readable status of this container (e.g. `Exit 0`)" type: "string" HostConfig: type: "object" properties: NetworkMode: type: "string" NetworkSettings: description: "A summary of the container's network settings" type: "object" properties: Networks: type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" Mounts: type: "array" items: $ref: "#/definitions/Mount" Driver: description: "Driver represents a driver (network, logging, secrets)." type: "object" required: [Name] properties: Name: description: "Name of the driver." type: "string" x-nullable: false example: "some-driver" Options: description: "Key/value map of driver-specific options." type: "object" x-nullable: false additionalProperties: type: "string" example: OptionA: "value for driver-specific option A" OptionB: "value for driver-specific option B" SecretSpec: type: "object" properties: Name: description: "User-defined name of the secret." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Data: description: | Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) data to store as secret. This field is only used to _create_ a secret, and is not returned by other endpoints. type: "string" example: "" Driver: description: | Name of the secrets driver used to fetch the secret's value from an external secret store. $ref: "#/definitions/Driver" Templating: description: | Templating driver, if applicable Templating controls whether and how to evaluate the config payload as a template. If no driver is set, no templating is used. $ref: "#/definitions/Driver" Secret: type: "object" properties: ID: type: "string" example: "blt1owaxmitz71s9v5zh81zun" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" example: "2017-07-20T13:55:28.678958722Z" UpdatedAt: type: "string" format: "dateTime" example: "2017-07-20T13:55:28.678958722Z" Spec: $ref: "#/definitions/SecretSpec" ConfigSpec: type: "object" properties: Name: description: "User-defined name of the config." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Data: description: | Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) config data. type: "string" Templating: description: | Templating driver, if applicable Templating controls whether and how to evaluate the config payload as a template. If no driver is set, no templating is used. $ref: "#/definitions/Driver" Config: type: "object" properties: ID: type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Spec: $ref: "#/definitions/ConfigSpec" ContainerState: description: | ContainerState stores container's running state. It's part of ContainerJSONBase and will be returned by the "inspect" command. type: "object" properties: Status: description: | String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead". type: "string" enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] example: "running" Running: description: | Whether this container is running. Note that a running container can be _paused_. The `Running` and `Paused` booleans are not mutually exclusive: When pausing a container (on Linux), the freezer cgroup is used to suspend all processes in the container. Freezing the process requires the process to be running. As a result, paused containers are both `Running` _and_ `Paused`. Use the `Status` field instead to determine if a container's state is "running". type: "boolean" example: true Paused: description: "Whether this container is paused." type: "boolean" example: false Restarting: description: "Whether this container is restarting." type: "boolean" example: false OOMKilled: description: | Whether this container has been killed because it ran out of memory. type: "boolean" example: false Dead: type: "boolean" example: false Pid: description: "The process ID of this container" type: "integer" example: 1234 ExitCode: description: "The last exit code of this container" type: "integer" example: 0 Error: type: "string" StartedAt: description: "The time when this container was last started." type: "string" example: "2020-01-06T09:06:59.461876391Z" FinishedAt: description: "The time when this container last exited." type: "string" example: "2020-01-06T09:07:59.461876391Z" Health: x-nullable: true $ref: "#/definitions/Health" SystemVersion: type: "object" description: | Response of Engine API: GET "/version" properties: Platform: type: "object" required: [Name] properties: Name: type: "string" Components: type: "array" description: | Information about system components items: type: "object" x-go-name: ComponentVersion required: [Name, Version] properties: Name: description: | Name of the component type: "string" example: "Engine" Version: description: | Version of the component type: "string" x-nullable: false example: "19.03.12" Details: description: | Key/value pairs of strings with additional information about the component. These values are intended for informational purposes only, and their content is not defined, and not part of the API specification. These messages can be printed by the client as information to the user. type: "object" x-nullable: true Version: description: "The version of the daemon" type: "string" example: "19.03.12" ApiVersion: description: | The default (and highest) API version that is supported by the daemon type: "string" example: "1.40" MinAPIVersion: description: | The minimum API version that is supported by the daemon type: "string" example: "1.12" GitCommit: description: | The Git commit of the source code that was used to build the daemon type: "string" example: "48a66213fe" GoVersion: description: | The version Go used to compile the daemon, and the version of the Go runtime in use. type: "string" example: "go1.13.14" Os: description: | The operating system that the daemon is running on ("linux" or "windows") type: "string" example: "linux" Arch: description: | The architecture that the daemon is running on type: "string" example: "amd64" KernelVersion: description: | The kernel version (`uname -r`) that the daemon is running on. This field is omitted when empty. type: "string" example: "4.19.76-linuxkit" Experimental: description: | Indicates if the daemon is started with experimental features enabled. This field is omitted when empty / false. type: "boolean" example: true BuildTime: description: | The date and time that the daemon was compiled. type: "string" example: "2020-06-22T15:49:27.000000000+00:00" SystemInfo: type: "object" properties: ID: description: | Unique identifier of the daemon. <p><br /></p> > **Note**: The format of the ID itself is not part of the API, and > should not be considered stable. type: "string" example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" Containers: description: "Total number of containers on the host." type: "integer" example: 14 ContainersRunning: description: | Number of containers with status `"running"`. type: "integer" example: 3 ContainersPaused: description: | Number of containers with status `"paused"`. type: "integer" example: 1 ContainersStopped: description: | Number of containers with status `"stopped"`. type: "integer" example: 10 Images: description: | Total number of images on the host. Both _tagged_ and _untagged_ (dangling) images are counted. type: "integer" example: 508 Driver: description: "Name of the storage driver in use." type: "string" example: "overlay2" DriverStatus: description: | Information specific to the storage driver, provided as "label" / "value" pairs. This information is provided by the storage driver, and formatted in a way consistent with the output of `docker info` on the command line. <p><br /></p> > **Note**: The information returned in this field, including the > formatting of values and labels, should not be considered stable, > and may change without notice. type: "array" items: type: "array" items: type: "string" example: - ["Backing Filesystem", "extfs"] - ["Supports d_type", "true"] - ["Native Overlay Diff", "true"] DockerRootDir: description: | Root directory of persistent Docker state. Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` on Windows. type: "string" example: "/var/lib/docker" Plugins: $ref: "#/definitions/PluginsInfo" MemoryLimit: description: "Indicates if the host has memory limit support enabled." type: "boolean" example: true SwapLimit: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true KernelMemory: description: | Indicates if the host has kernel memory limit support enabled. <p><br /></p> > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated > `kmem.limit_in_bytes`. type: "boolean" example: true CpuCfsPeriod: description: | Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host. type: "boolean" example: true CpuCfsQuota: description: | Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host. type: "boolean" example: true CPUShares: description: | Indicates if CPU Shares limiting is supported by the host. type: "boolean" example: true CPUSet: description: | Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) type: "boolean" example: true PidsLimit: description: "Indicates if the host kernel has PID limit support enabled." type: "boolean" example: true OomKillDisable: description: "Indicates if OOM killer disable is supported on the host." type: "boolean" IPv4Forwarding: description: "Indicates IPv4 forwarding is enabled." type: "boolean" example: true BridgeNfIptables: description: "Indicates if `bridge-nf-call-iptables` is available on the host." type: "boolean" example: true BridgeNfIp6tables: description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." type: "boolean" example: true Debug: description: | Indicates if the daemon is running in debug-mode / with debug-level logging enabled. type: "boolean" example: true NFd: description: | The total number of file Descriptors in use by the daemon process. This information is only returned if debug-mode is enabled. type: "integer" example: 64 NGoroutines: description: | The number of goroutines that currently exist. This information is only returned if debug-mode is enabled. type: "integer" example: 174 SystemTime: description: | Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" example: "2017-08-08T20:28:29.06202363Z" LoggingDriver: description: | The logging driver to use as a default for new containers. type: "string" CgroupDriver: description: | The driver to use for managing cgroups. type: "string" enum: ["cgroupfs", "systemd", "none"] default: "cgroupfs" example: "cgroupfs" CgroupVersion: description: | The version of the cgroup. type: "string" enum: ["1", "2"] default: "1" example: "1" NEventsListener: description: "Number of event listeners subscribed." type: "integer" example: 30 KernelVersion: description: | Kernel version of the host. On Linux, this information obtained from `uname`. On Windows this information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd> registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. type: "string" example: "4.9.38-moby" OperatingSystem: description: | Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" or "Windows Server 2016 Datacenter" type: "string" example: "Alpine Linux v3.5" OSVersion: description: | Version of the host's operating system <p><br /></p> > **Note**: The information returned in this field, including its > very existence, and the formatting of values, should not be considered > stable, and may change without notice. type: "string" example: "16.04" OSType: description: | Generic type of the operating system of the host, as returned by the Go runtime (`GOOS`). Currently returned values are "linux" and "windows". A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). type: "string" example: "linux" Architecture: description: | Hardware architecture of the host, as returned by the Go runtime (`GOARCH`). A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). type: "string" example: "x86_64" NCPU: description: | The number of logical CPUs usable by the daemon. The number of available CPUs is checked by querying the operating system when the daemon starts. Changes to operating system CPU allocation after the daemon is started are not reflected. type: "integer" example: 4 MemTotal: description: | Total amount of physical memory available on the host, in bytes. type: "integer" format: "int64" example: 2095882240 IndexServerAddress: description: | Address / URL of the index server that is used for image search, and as a default for user authentication for Docker Hub and Docker Cloud. default: "https://index.docker.io/v1/" type: "string" example: "https://index.docker.io/v1/" RegistryConfig: $ref: "#/definitions/RegistryServiceConfig" GenericResources: $ref: "#/definitions/GenericResources" HttpProxy: description: | HTTP-proxy configured for the daemon. This value is obtained from the [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL are masked in the API response. Containers do not automatically inherit this configuration. type: "string" example: "http://xxxxx:[email protected]:8080" HttpsProxy: description: | HTTPS-proxy configured for the daemon. This value is obtained from the [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL are masked in the API response. Containers do not automatically inherit this configuration. type: "string" example: "https://xxxxx:[email protected]:4443" NoProxy: description: | Comma-separated list of domain extensions for which no proxy should be used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Containers do not automatically inherit this configuration. type: "string" example: "*.local, 169.254/16" Name: description: "Hostname of the host." type: "string" example: "node5.corp.example.com" Labels: description: | User-defined labels (key/value metadata) as set on the daemon. <p><br /></p> > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, > set through the daemon configuration, and _node_ labels, set from a > manager node in the Swarm. Node labels are not included in this > field. Node labels can be retrieved using the `/nodes/(id)` endpoint > on a manager node in the Swarm. type: "array" items: type: "string" example: ["storage=ssd", "production"] ExperimentalBuild: description: | Indicates if experimental features are enabled on the daemon. type: "boolean" example: true ServerVersion: description: | Version string of the daemon. > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) > returns the Swarm version instead of the daemon version, for example > `swarm/1.2.8`. type: "string" example: "17.06.0-ce" ClusterStore: description: | URL of the distributed storage backend. The storage backend is used for multihost networking (to store network and endpoint information) and by the node discovery mechanism. <p><br /></p> > **Deprecated**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. type: "string" example: "consul://consul.corp.example.com:8600/some/path" ClusterAdvertise: description: | The network endpoint that the Engine advertises for the purpose of node discovery. ClusterAdvertise is a `host:port` combination on which the daemon is reachable by other hosts. <p><br /></p> > **Deprecated**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. type: "string" example: "node5.corp.example.com:8000" Runtimes: description: | List of [OCI compliant](https://github.com/opencontainers/runtime-spec) runtimes configured on the daemon. Keys hold the "name" used to reference the runtime. The Docker daemon relies on an OCI compliant runtime (invoked via the `containerd` daemon) as its interface to the Linux kernel namespaces, cgroups, and SELinux. The default runtime is `runc`, and automatically configured. Additional runtimes can be configured by the user and will be listed here. type: "object" additionalProperties: $ref: "#/definitions/Runtime" default: runc: path: "runc" example: runc: path: "runc" runc-master: path: "/go/bin/runc" custom: path: "/usr/local/bin/my-oci-runtime" runtimeArgs: ["--debug", "--systemd-cgroup=false"] DefaultRuntime: description: | Name of the default OCI runtime that is used when starting containers. The default can be overridden per-container at create time. type: "string" default: "runc" example: "runc" Swarm: $ref: "#/definitions/SwarmInfo" LiveRestoreEnabled: description: | Indicates if live restore is enabled. If enabled, containers are kept running when the daemon is shutdown or upon daemon start if running containers are detected. type: "boolean" default: false example: false Isolation: description: | Represents the isolation technology to use as a default for containers. The supported values are platform-specific. If no isolation value is specified on daemon start, on Windows client, the default is `hyperv`, and on Windows server, the default is `process`. This option is currently not used on other platforms. default: "default" type: "string" enum: - "default" - "hyperv" - "process" InitBinary: description: | Name and, optional, path of the `docker-init` binary. If the path is omitted, the daemon searches the host's `$PATH` for the binary and uses the first result. type: "string" example: "docker-init" ContainerdCommit: $ref: "#/definitions/Commit" RuncCommit: $ref: "#/definitions/Commit" InitCommit: $ref: "#/definitions/Commit" SecurityOptions: description: | List of security features that are enabled on the daemon, such as apparmor, seccomp, SELinux, user-namespaces (userns), and rootless. Additional configuration options for each security feature may be present, and are included as a comma-separated list of key/value pairs. type: "array" items: type: "string" example: - "name=apparmor" - "name=seccomp,profile=default" - "name=selinux" - "name=userns" - "name=rootless" ProductLicense: description: | Reports a summary of the product license on the daemon. If a commercial license has been applied to the daemon, information such as number of nodes, and expiration are included. type: "string" example: "Community Engine" DefaultAddressPools: description: | List of custom default address pools for local networks, which can be specified in the daemon.json file or dockerd option. Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 10.10.[0-255].0/24 address pools. type: "array" items: type: "object" properties: Base: description: "The network address in CIDR format" type: "string" example: "10.10.0.0/16" Size: description: "The network pool size" type: "integer" example: "24" Warnings: description: | List of warnings / informational messages about missing features, or issues related to the daemon configuration. These messages can be printed by the client as information to the user. type: "array" items: type: "string" example: - "WARNING: No memory limit support" - "WARNING: bridge-nf-call-iptables is disabled" - "WARNING: bridge-nf-call-ip6tables is disabled" # PluginsInfo is a temp struct holding Plugins name # registered with docker daemon. It is used by Info struct PluginsInfo: description: | Available plugins per type. <p><br /></p> > **Note**: Only unmanaged (V1) plugins are included in this list. > V1 plugins are "lazily" loaded, and are not returned in this list > if there is no resource using the plugin. type: "object" properties: Volume: description: "Names of available volume-drivers, and network-driver plugins." type: "array" items: type: "string" example: ["local"] Network: description: "Names of available network-drivers, and network-driver plugins." type: "array" items: type: "string" example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] Authorization: description: "Names of available authorization plugins." type: "array" items: type: "string" example: ["img-authz-plugin", "hbm"] Log: description: "Names of available logging-drivers, and logging-driver plugins." type: "array" items: type: "string" example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] RegistryServiceConfig: description: | RegistryServiceConfig stores daemon registry services configuration. type: "object" x-nullable: true properties: AllowNondistributableArtifactsCIDRs: description: | List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). Some images (for example, Windows base images) contain artifacts whose distribution is restricted by license. When these images are pushed to a registry, restricted artifacts are not included. This configuration override this behavior, and enables the daemon to push nondistributable artifacts to all registries whose resolved IP address is within the subnet described by the CIDR syntax. This option is useful when pushing images containing nondistributable artifacts to a registry on an air-gapped network so hosts on that network can pull the images without connecting to another server. > **Warning**: Nondistributable artifacts typically have restrictions > on how and where they can be distributed and shared. Only use this > feature to push artifacts to private registries and ensure that you > are in compliance with any terms that cover redistributing > nondistributable artifacts. type: "array" items: type: "string" example: ["::1/128", "127.0.0.0/8"] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `<hostname>[:<port>]` or `<IP address>[:<port>]`. Some images (for example, Windows base images) contain artifacts whose distribution is restricted by license. When these images are pushed to a registry, restricted artifacts are not included. This configuration override this behavior for the specified registries. This option is useful when pushing images containing nondistributable artifacts to a registry on an air-gapped network so hosts on that network can pull the images without connecting to another server. > **Warning**: Nondistributable artifacts typically have restrictions > on how and where they can be distributed and shared. Only use this > feature to push artifacts to private registries and ensure that you > are in compliance with any terms that cover redistributing > nondistributable artifacts. type: "array" items: type: "string" example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. By default, local registries (`127.0.0.0/8`) are configured as insecure. All other registries are secure. Communicating with an insecure registry is not possible if the daemon assumes that registry is secure. This configuration override this behavior, insecure communication with registries whose resolved IP address is within the subnet described by the CIDR syntax. Registries can also be marked insecure by hostname. Those registries are listed under `IndexConfigs` and have their `Secure` field set to `false`. > **Warning**: Using this option can be useful when running a local > registry, but introduces security vulnerabilities. This option > should therefore ONLY be used for testing purposes. For increased > security, users should add their CA to their system's list of trusted > CAs instead of enabling this option. type: "array" items: type: "string" example: ["::1/128", "127.0.0.0/8"] IndexConfigs: type: "object" additionalProperties: $ref: "#/definitions/IndexInfo" example: "127.0.0.1:5000": "Name": "127.0.0.1:5000" "Mirrors": [] "Secure": false "Official": false "[2001:db8:a0b:12f0::1]:80": "Name": "[2001:db8:a0b:12f0::1]:80" "Mirrors": [] "Secure": false "Official": false "docker.io": Name: "docker.io" Mirrors: ["https://hub-mirror.corp.example.com:5000/"] Secure: true Official: true "registry.internal.corp.example.com:3000": Name: "registry.internal.corp.example.com:3000" Mirrors: [] Secure: false Official: false Mirrors: description: | List of registry URLs that act as a mirror for the official (`docker.io`) registry. type: "array" items: type: "string" example: - "https://hub-mirror.corp.example.com:5000/" - "https://[2001:db8:a0b:12f0::1]/" IndexInfo: description: IndexInfo contains information about a registry. type: "object" x-nullable: true properties: Name: description: | Name of the registry, such as "docker.io". type: "string" example: "docker.io" Mirrors: description: | List of mirrors, expressed as URIs. type: "array" items: type: "string" example: - "https://hub-mirror.corp.example.com:5000/" - "https://registry-2.docker.io/" - "https://registry-3.docker.io/" Secure: description: | Indicates if the registry is part of the list of insecure registries. If `false`, the registry is insecure. Insecure registries accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. > **Warning**: Insecure registries can be useful when running a local > registry. However, because its use creates security vulnerabilities > it should ONLY be enabled for testing purposes. For increased > security, users should add their CA to their system's list of > trusted CAs instead of enabling this option. type: "boolean" example: true Official: description: | Indicates whether this is an official registry (i.e., Docker Hub / docker.io) type: "boolean" example: true Runtime: description: | Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) runtime. The runtime is invoked by the daemon via the `containerd` daemon. OCI runtimes act as an interface to the Linux kernel namespaces, cgroups, and SELinux. type: "object" properties: path: description: | Name and, optional, path, of the OCI executable binary. If the path is omitted, the daemon searches the host's `$PATH` for the binary and uses the first result. type: "string" example: "/usr/local/bin/my-oci-runtime" runtimeArgs: description: | List of command-line arguments to pass to the runtime when invoked. type: "array" x-nullable: true items: type: "string" example: ["--debug", "--systemd-cgroup=false"] Commit: description: | Commit holds the Git-commit (SHA1) that a binary was built from, as reported in the version-string of external tools, such as `containerd`, or `runC`. type: "object" properties: ID: description: "Actual commit ID of external tool." type: "string" example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" Expected: description: | Commit ID of external tool expected by dockerd as set at build time. type: "string" example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" SwarmInfo: description: | Represents generic information about swarm. type: "object" properties: NodeID: description: "Unique identifier of for this node in the swarm." type: "string" default: "" example: "k67qz4598weg5unwwffg6z1m1" NodeAddr: description: | IP address at which this node can be reached by other nodes in the swarm. type: "string" default: "" example: "10.0.0.46" LocalNodeState: $ref: "#/definitions/LocalNodeState" ControlAvailable: type: "boolean" default: false example: true Error: type: "string" default: "" RemoteManagers: description: | List of ID's and addresses of other managers in the swarm. type: "array" default: null x-nullable: true items: $ref: "#/definitions/PeerNode" example: - NodeID: "71izy0goik036k48jg985xnds" Addr: "10.0.0.158:2377" - NodeID: "79y6h1o4gv8n120drcprv5nmc" Addr: "10.0.0.159:2377" - NodeID: "k67qz4598weg5unwwffg6z1m1" Addr: "10.0.0.46:2377" Nodes: description: "Total number of nodes in the swarm." type: "integer" x-nullable: true example: 4 Managers: description: "Total number of managers in the swarm." type: "integer" x-nullable: true example: 3 Cluster: $ref: "#/definitions/ClusterInfo" LocalNodeState: description: "Current local status of this node." type: "string" default: "" enum: - "" - "inactive" - "pending" - "active" - "error" - "locked" example: "active" PeerNode: description: "Represents a peer-node in the swarm" properties: NodeID: description: "Unique identifier of for this node in the swarm." type: "string" Addr: description: | IP address and ports at which this node can be reached. type: "string" NetworkAttachmentConfig: description: | Specifies how a service should be attached to a particular network. type: "object" properties: Target: description: | The target network for attachment. Must be a network name or ID. type: "string" Aliases: description: | Discoverable alternate names for the service on this network. type: "array" items: type: "string" DriverOpts: description: | Driver attachment options for the network target. type: "object" additionalProperties: type: "string" EventActor: description: | Actor describes something that generates events, like a container, network, or a volume. type: "object" properties: ID: description: "The ID of the object emitting the event" type: "string" example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" Attributes: description: | Various key/value attributes of the object, depending on its type. type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-label-value" image: "alpine:latest" name: "my-container" EventMessage: description: | EventMessage represents the information an event contains. type: "object" title: "SystemEventsResponse" properties: Type: description: "The type of object emitting the event" type: "string" enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] example: "container" Action: description: "The type of event" type: "string" example: "create" Actor: $ref: "#/definitions/EventActor" scope: description: | Scope of the event. Engine events are `local` scope. Cluster (Swarm) events are `swarm` scope. type: "string" enum: ["local", "swarm"] time: description: "Timestamp of event" type: "integer" format: "int64" example: 1629574695 timeNano: description: "Timestamp of event, with nanosecond accuracy" type: "integer" format: "int64" example: 1629574695515050031 OCIDescriptor: type: "object" x-go-name: Descriptor description: | A descriptor struct containing digest, media type, and size, as defined in the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). properties: mediaType: description: | The media type of the object this schema refers to. type: "string" example: "application/vnd.docker.distribution.manifest.v2+json" digest: description: | The digest of the targeted content. type: "string" example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" size: description: | The size in bytes of the blob. type: "integer" format: "int64" example: 3987495 # TODO Not yet including these fields for now, as they are nil / omitted in our response. # urls: # description: | # List of URLs from which this object MAY be downloaded. # type: "array" # items: # type: "string" # format: "uri" # annotations: # description: | # Arbitrary metadata relating to the targeted content. # type: "object" # additionalProperties: # type: "string" # platform: # $ref: "#/definitions/OCIPlatform" OCIPlatform: type: "object" x-go-name: Platform description: | Describes the platform which the image in the manifest runs on, as defined in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). properties: architecture: description: | The CPU architecture, for example `amd64` or `ppc64`. type: "string" example: "arm" os: description: | The operating system, for example `linux` or `windows`. type: "string" example: "windows" os.version: description: | Optional field specifying the operating system version, for example on Windows `10.0.19041.1165`. type: "string" example: "10.0.19041.1165" os.features: description: | Optional field specifying an array of strings, each listing a required OS feature (for example on Windows `win32k`). type: "array" items: type: "string" example: - "win32k" variant: description: | Optional field specifying a variant of the CPU, for example `v7` to specify ARMv7 when architecture is `arm`. type: "string" example: "v7" DistributionInspect: type: "object" x-go-name: DistributionInspect title: "DistributionInspectResponse" required: [Descriptor, Platforms] description: | Describes the result obtained from contacting the registry to retrieve image metadata. properties: Descriptor: $ref: "#/definitions/OCIDescriptor" Platforms: type: "array" description: | An array containing all platforms supported by the image. items: $ref: "#/definitions/OCIPlatform" paths: /containers/json: get: summary: "List containers" description: | Returns a list of containers. For details on the format, see the [inspect endpoint](#operation/ContainerInspect). Note that it uses a different, smaller representation of a container than inspecting a single container. For example, the list of linked containers is not propagated . operationId: "ContainerList" produces: - "application/json" parameters: - name: "all" in: "query" description: | Return all containers. By default, only running containers are shown. type: "boolean" default: false - name: "limit" in: "query" description: | Return this number of most recently created containers, including non-running ones. type: "integer" - name: "size" in: "query" description: | Return the size of container as fields `SizeRw` and `SizeRootFs`. type: "boolean" default: false - name: "filters" in: "query" description: | Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`) - `before`=(`<container id>` or `<container name>`) - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) - `exited=<int>` containers with exit code of `<int>` - `health`=(`starting`|`healthy`|`unhealthy`|`none`) - `id=<ID>` a container's ID - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) - `is-task=`(`true`|`false`) - `label=key` or `label="key=value"` of a container label - `name=<name>` a container's name - `network`=(`<network id>` or `<network name>`) - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) - `since`=(`<container id>` or `<container name>`) - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) - `volume`=(`<volume name>` or `<mount point destination>`) type: "string" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/ContainerSummary" examples: application/json: - Id: "8dfafdbc3a40" Names: - "/boring_feynman" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 1" Created: 1367854155 State: "Exited" Status: "Exit 0" Ports: - PrivatePort: 2222 PublicPort: 3333 Type: "tcp" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" Gateway: "172.17.0.1" IPAddress: "172.17.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:02" Mounts: - Name: "fac362...80535" Source: "/data" Destination: "/data" Driver: "local" Mode: "ro,Z" RW: false Propagation: "" - Id: "9cd87474be90" Names: - "/coolName" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 222222" Created: 1367854155 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" Gateway: "172.17.0.1" IPAddress: "172.17.0.8" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:08" Mounts: [] - Id: "3176a2479c92" Names: - "/sleepy_dog" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 3333333333333333" Created: 1367854154 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" Gateway: "172.17.0.1" IPAddress: "172.17.0.6" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:06" Mounts: [] - Id: "4cb07b47f9fb" Names: - "/running_cat" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 444444444444444444444444444444444" Created: 1367854152 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" Gateway: "172.17.0.1" IPAddress: "172.17.0.5" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:05" Mounts: [] 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /containers/create: post: summary: "Create a container" operationId: "ContainerCreate" consumes: - "application/json" - "application/octet-stream" produces: - "application/json" parameters: - name: "name" in: "query" description: | Assign the specified name to the container. Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. type: "string" pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - name: "body" in: "body" description: "Container to create" schema: allOf: - $ref: "#/definitions/ContainerConfig" - type: "object" properties: HostConfig: $ref: "#/definitions/HostConfig" NetworkingConfig: $ref: "#/definitions/NetworkingConfig" example: Hostname: "" Domainname: "" User: "" AttachStdin: false AttachStdout: true AttachStderr: true Tty: false OpenStdin: false StdinOnce: false Env: - "FOO=bar" - "BAZ=quux" Cmd: - "date" Entrypoint: "" Image: "ubuntu" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" Volumes: /volumes/data: {} WorkingDir: "" NetworkDisabled: false MacAddress: "12:34:56:78:9a:bc" ExposedPorts: 22/tcp: {} StopSignal: "SIGTERM" StopTimeout: 10 HostConfig: Binds: - "/tmp:/tmp" Links: - "redis3:redis" Memory: 0 MemorySwap: 0 MemoryReservation: 0 KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 CpuPeriod: 100000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 CpuQuota: 50000 CpusetCpus: "0,1" CpusetMems: "0,1" MaximumIOps: 0 MaximumIOBps: 0 BlkioWeight: 300 BlkioWeightDevice: - {} BlkioDeviceReadBps: - {} BlkioDeviceReadIOps: - {} BlkioDeviceWriteBps: - {} BlkioDeviceWriteIOps: - {} DeviceRequests: - Driver: "nvidia" Count: -1 DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] Capabilities: [["gpu", "nvidia", "compute"]] Options: property1: "string" property2: "string" MemorySwappiness: 60 OomKillDisable: false OomScoreAdj: 500 PidMode: "" PidsLimit: 0 PortBindings: 22/tcp: - HostPort: "11022" PublishAllPorts: false Privileged: false ReadonlyRootfs: false Dns: - "8.8.8.8" DnsOptions: - "" DnsSearch: - "" VolumesFrom: - "parent" - "other:ro" CapAdd: - "NET_ADMIN" CapDrop: - "MKNOD" GroupAdd: - "newgroup" RestartPolicy: Name: "" MaximumRetryCount: 0 AutoRemove: true NetworkMode: "bridge" Devices: [] Ulimits: - {} LogConfig: Type: "json-file" Config: {} SecurityOpt: [] StorageOpt: {} CgroupParent: "" VolumeDriver: "" ShmSize: 67108864 NetworkingConfig: EndpointsConfig: isolated_nw: IPAMConfig: IPv4Address: "172.20.30.33" IPv6Address: "2001:db8:abcd::3033" LinkLocalIPs: - "169.254.34.68" - "fe80::3468" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" required: true responses: 201: description: "Container created successfully" schema: type: "object" title: "ContainerCreateResponse" description: "OK response to ContainerCreate operation" required: [Id, Warnings] properties: Id: description: "The ID of the created container" type: "string" x-nullable: false Warnings: description: "Warnings encountered when creating the container" type: "array" x-nullable: false items: type: "string" examples: application/json: Id: "e90e34656806" Warnings: [] 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such image" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: c2ada9df5af8" 409: description: "conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /containers/{id}/json: get: summary: "Inspect a container" description: "Return low-level information about a container." operationId: "ContainerInspect" produces: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "ContainerInspectResponse" properties: Id: description: "The ID of the container" type: "string" Created: description: "The time the container was created" type: "string" Path: description: "The path to the command being run" type: "string" Args: description: "The arguments to the command being run" type: "array" items: type: "string" State: x-nullable: true $ref: "#/definitions/ContainerState" Image: description: "The container's image ID" type: "string" ResolvConfPath: type: "string" HostnamePath: type: "string" HostsPath: type: "string" LogPath: type: "string" Name: type: "string" RestartCount: type: "integer" Driver: type: "string" Platform: type: "string" MountLabel: type: "string" ProcessLabel: type: "string" AppArmorProfile: type: "string" ExecIDs: description: "IDs of exec instances that are running in the container." type: "array" items: type: "string" x-nullable: true HostConfig: $ref: "#/definitions/HostConfig" GraphDriver: $ref: "#/definitions/GraphDriverData" SizeRw: description: | The size of files that have been created or changed by this container. type: "integer" format: "int64" SizeRootFs: description: "The total size of all the files in this container." type: "integer" format: "int64" Mounts: type: "array" items: $ref: "#/definitions/MountPoint" Config: $ref: "#/definitions/ContainerConfig" NetworkSettings: $ref: "#/definitions/NetworkSettings" examples: application/json: AppArmorProfile: "" Args: - "-c" - "exit 9" Config: AttachStderr: true AttachStdin: false AttachStdout: true Cmd: - "/bin/sh" - "-c" - "exit 9" Domainname: "" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Healthcheck: Test: ["CMD-SHELL", "exit 0"] Hostname: "ba033ac44011" Image: "ubuntu" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" MacAddress: "" NetworkDisabled: false OpenStdin: false StdinOnce: false Tty: false User: "" Volumes: /volumes/data: {} WorkingDir: "" StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" Driver: "devicemapper" ExecIDs: - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 BlkioWeight: 0 BlkioWeightDevice: - {} BlkioDeviceReadBps: - {} BlkioDeviceWriteBps: - {} BlkioDeviceReadIOps: - {} BlkioDeviceWriteIOps: - {} ContainerIDFile: "" CpusetCpus: "" CpusetMems: "" CpuPercent: 80 CpuShares: 0 CpuPeriod: 100000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 Devices: [] DeviceRequests: - Driver: "nvidia" Count: -1 DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] Capabilities: [["gpu", "nvidia", "compute"]] Options: property1: "string" property2: "string" IpcMode: "" LxcConf: [] Memory: 0 MemorySwap: 0 MemoryReservation: 0 KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" PidMode: "" PortBindings: {} Privileged: false ReadonlyRootfs: false PublishAllPorts: false RestartPolicy: MaximumRetryCount: 2 Name: "on-failure" LogConfig: Type: "json-file" Sysctls: net.ipv4.ip_forward: "1" Ulimits: - {} VolumeDriver: "" ShmSize: 67108864 HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" MountLabel: "" Name: "/boring_euclid" NetworkSettings: Bridge: "" SandboxID: "" HairpinMode: false LinkLocalIPv6Address: "" LinkLocalIPv6PrefixLen: 0 SandboxKey: "" EndpointID: "" Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 IPAddress: "" IPPrefixLen: 0 IPv6Gateway: "" MacAddress: "" Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" Gateway: "172.17.0.1" IPAddress: "172.17.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:12:00:02" Path: "/bin/sh" ProcessLabel: "" ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" RestartCount: 1 State: Error: "" ExitCode: 9 FinishedAt: "2015-01-06T15:47:32.080254511Z" Health: Status: "healthy" FailingStreak: 0 Log: - Start: "2019-12-22T10:59:05.6385933Z" End: "2019-12-22T10:59:05.8078452Z" ExitCode: 0 Output: "" OOMKilled: false Dead: false Paused: false Pid: 0 Restarting: false Running: true StartedAt: "2015-01-06T15:47:32.072697474Z" Status: "running" Mounts: - Name: "fac362...80535" Source: "/data" Destination: "/data" Driver: "local" Mode: "ro,Z" RW: false Propagation: "" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "size" in: "query" type: "boolean" default: false description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" tags: ["Container"] /containers/{id}/top: get: summary: "List processes running inside a container" description: | On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows. operationId: "ContainerTop" responses: 200: description: "no error" schema: type: "object" title: "ContainerTopResponse" description: "OK response to ContainerTop operation" properties: Titles: description: "The ps column titles" type: "array" items: type: "string" Processes: description: | Each process running in the container, where each is process is an array of values corresponding to the titles. type: "array" items: type: "array" items: type: "string" examples: application/json: Titles: - "UID" - "PID" - "PPID" - "C" - "STIME" - "TTY" - "TIME" - "CMD" Processes: - - "root" - "13642" - "882" - "0" - "17:03" - "pts/0" - "00:00:00" - "/bin/bash" - - "root" - "13735" - "13642" - "0" - "17:06" - "pts/0" - "00:00:00" - "sleep 10" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "ps_args" in: "query" description: "The arguments to pass to `ps`. For example, `aux`" type: "string" default: "-ef" tags: ["Container"] /containers/{id}/logs: get: summary: "Get container logs" description: | Get `stdout` and `stderr` logs from a container. Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. operationId: "ContainerLogs" responses: 200: description: | logs returned as a stream in response body. For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). Note that unlike the attach endpoint, the logs endpoint does not upgrade the connection and does not set Content-Type. schema: type: "string" format: "binary" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "until" in: "query" description: "Only return logs before this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Container"] /containers/{id}/changes: get: summary: "Get changes on a container’s filesystem" description: | Returns which files in a container's filesystem have been added, deleted, or modified. The `Kind` of modification can be one of: - `0`: Modified - `1`: Added - `2`: Deleted operationId: "ContainerChanges" produces: ["application/json"] responses: 200: description: "The list of changes" schema: type: "array" items: type: "object" x-go-name: "ContainerChangeResponseItem" title: "ContainerChangeResponseItem" description: "change item in response to ContainerChanges operation" required: [Path, Kind] properties: Path: description: "Path to file that has changed" type: "string" x-nullable: false Kind: description: "Kind of change" type: "integer" format: "uint8" enum: [0, 1, 2] x-nullable: false examples: application/json: - Path: "/dev" Kind: 0 - Path: "/dev/kmsg" Kind: 1 - Path: "/test" Kind: 1 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/export: get: summary: "Export a container" description: "Export the contents of a container as a tarball." operationId: "ContainerExport" produces: - "application/octet-stream" responses: 200: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/stats: get: summary: "Get container stats based on resource usage" description: | This endpoint returns a live stream of a container’s resource usage statistics. The `precpu_stats` is the CPU statistic of the *previous* read, and is used to calculate the CPU usage percentage. It is not an exact copy of the `cpu_stats` field. If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is nil then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. On a cgroup v2 host, the following fields are not set * `blkio_stats`: all fields other than `io_service_bytes_recursive` * `cpu_stats`: `cpu_usage.percpu_usage` * `memory_stats`: `max_usage` and `failcnt` Also, `memory_stats.stats` fields are incompatible with cgroup v1. To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: * used_memory = `memory_stats.usage - memory_stats.stats.cache` * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` operationId: "ContainerStats" produces: ["application/json"] responses: 200: description: "no error" schema: type: "object" examples: application/json: read: "2015-01-08T22:57:31.547920715Z" pids_stats: current: 3 networks: eth0: rx_bytes: 5338 rx_dropped: 0 rx_errors: 0 rx_packets: 36 tx_bytes: 648 tx_dropped: 0 tx_errors: 0 tx_packets: 8 eth5: rx_bytes: 4641 rx_dropped: 0 rx_errors: 0 rx_packets: 26 tx_bytes: 690 tx_dropped: 0 tx_errors: 0 tx_packets: 9 memory_stats: stats: total_pgmajfault: 0 cache: 0 mapped_file: 0 total_inactive_file: 0 pgpgout: 414 rss: 6537216 total_mapped_file: 0 writeback: 0 unevictable: 0 pgpgin: 477 total_unevictable: 0 pgmajfault: 0 total_rss: 6537216 total_rss_huge: 6291456 total_writeback: 0 total_inactive_anon: 0 rss_huge: 6291456 hierarchical_memory_limit: 67108864 total_pgfault: 964 total_active_file: 0 active_anon: 6537216 total_active_anon: 6537216 total_pgpgout: 414 total_cache: 0 inactive_anon: 0 active_file: 0 pgfault: 964 inactive_file: 0 total_pgpgin: 477 max_usage: 6651904 usage: 6537216 failcnt: 0 limit: 67108864 blkio_stats: {} cpu_stats: cpu_usage: percpu_usage: - 8646879 - 24472255 - 36438778 - 30657443 usage_in_usermode: 50000000 total_usage: 100215355 usage_in_kernelmode: 30000000 system_cpu_usage: 739306590000000 online_cpus: 4 throttling_data: periods: 0 throttled_periods: 0 throttled_time: 0 precpu_stats: cpu_usage: percpu_usage: - 8646879 - 24350896 - 36438778 - 30657443 usage_in_usermode: 50000000 total_usage: 100093996 usage_in_kernelmode: 30000000 system_cpu_usage: 9492140000000 online_cpus: 4 throttling_data: periods: 0 throttled_periods: 0 throttled_time: 0 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "stream" in: "query" description: | Stream the output. If false, the stats will be output once and then it will disconnect. type: "boolean" default: true - name: "one-shot" in: "query" description: | Only get a single stat instead of waiting for 2 cycles. Must be used with `stream=false`. type: "boolean" default: false tags: ["Container"] /containers/{id}/resize: post: summary: "Resize a container TTY" description: "Resize the TTY for a container." operationId: "ContainerResize" consumes: - "application/octet-stream" produces: - "text/plain" responses: 200: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "cannot resize container" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "h" in: "query" description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" description: "Width of the TTY session in characters" type: "integer" tags: ["Container"] /containers/{id}/start: post: summary: "Start a container" operationId: "ContainerStart" responses: 204: description: "no error" 304: description: "container already started" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. type: "string" tags: ["Container"] /containers/{id}/stop: post: summary: "Stop a container" operationId: "ContainerStop" responses: 204: description: "no error" 304: description: "container already stopped" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" type: "integer" tags: ["Container"] /containers/{id}/restart: post: summary: "Restart a container" operationId: "ContainerRestart" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" type: "integer" tags: ["Container"] /containers/{id}/kill: post: summary: "Kill a container" description: | Send a POSIX signal to a container, defaulting to killing to the container. operationId: "ContainerKill" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "container is not running" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "signal" in: "query" description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" type: "string" default: "SIGKILL" tags: ["Container"] /containers/{id}/update: post: summary: "Update a container" description: | Change various configuration options of a container without having to recreate it. operationId: "ContainerUpdate" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "The container has been updated." schema: type: "object" title: "ContainerUpdateResponse" description: "OK response to ContainerUpdate operation" properties: Warnings: type: "array" items: type: "string" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "update" in: "body" required: true schema: allOf: - $ref: "#/definitions/Resources" - type: "object" properties: RestartPolicy: $ref: "#/definitions/RestartPolicy" example: BlkioWeight: 300 CpuShares: 512 CpuPeriod: 100000 CpuQuota: 50000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 CpusetCpus: "0,1" CpusetMems: "0" Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" tags: ["Container"] /containers/{id}/rename: post: summary: "Rename a container" operationId: "ContainerRename" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "name already in use" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "name" in: "query" required: true description: "New name for the container" type: "string" tags: ["Container"] /containers/{id}/pause: post: summary: "Pause a container" description: | Use the freezer cgroup to suspend all processes in a container. Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the freezer cgroup the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. operationId: "ContainerPause" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/unpause: post: summary: "Unpause a container" description: "Resume a container which has been paused." operationId: "ContainerUnpause" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/attach: post: summary: "Attach to a container" description: | Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. ### Hijacking This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. This is the response from the daemon for an attach request: ``` HTTP/1.1 200 OK Content-Type: application/vnd.docker.raw-stream [STREAM] ``` After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. For example, the client sends this request to upgrade the connection: ``` POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 Upgrade: tcp Connection: Upgrade ``` The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: ``` HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp [STREAM] ``` ### Stream format When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). It is encoded on the first eight bytes like this: ```go header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} ``` `STREAM_TYPE` can be: - 0: `stdin` (is written on `stdout`) - 1: `stdout` - 2: `stderr` `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. The simplest way to implement this protocol is the following: 1. Read 8 bytes. 2. Choose `stdout` or `stderr` depending on the first byte. 3. Extract the frame size from the last four bytes. 4. Read the extracted size and output it on the correct output. 5. Goto 1. ### Stream format when using a TTY When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. operationId: "ContainerAttach" produces: - "application/vnd.docker.raw-stream" responses: 101: description: "no error, hints proxy about hijacking" 200: description: "no error, no upgrade header found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. type: "string" - name: "logs" in: "query" description: | Replay previous logs from the container. This is useful for attaching to a container that has started and you want to output everything since the container started. If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. type: "boolean" default: false - name: "stream" in: "query" description: | Stream attached streams from the time the request was made onwards. type: "boolean" default: false - name: "stdin" in: "query" description: "Attach to `stdin`" type: "boolean" default: false - name: "stdout" in: "query" description: "Attach to `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Attach to `stderr`" type: "boolean" default: false tags: ["Container"] /containers/{id}/attach/ws: get: summary: "Attach to a container via a websocket" operationId: "ContainerAttachWebsocket" responses: 101: description: "no error, hints proxy about hijacking" 200: description: "no error, no upgrade header found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`. type: "string" - name: "logs" in: "query" description: "Return logs" type: "boolean" default: false - name: "stream" in: "query" description: "Return stream" type: "boolean" default: false - name: "stdin" in: "query" description: "Attach to `stdin`" type: "boolean" default: false - name: "stdout" in: "query" description: "Attach to `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Attach to `stderr`" type: "boolean" default: false tags: ["Container"] /containers/{id}/wait: post: summary: "Wait for a container" description: "Block until a container stops, then returns the exit code." operationId: "ContainerWait" produces: ["application/json"] responses: 200: description: "The container has exit." schema: type: "object" title: "ContainerWaitResponse" description: "OK response to ContainerWait operation" required: [StatusCode] properties: StatusCode: description: "Exit code of the container" type: "integer" x-nullable: false Error: description: "container waiting error, if any" type: "object" properties: Message: description: "Details of an error" type: "string" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "condition" in: "query" description: | Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'. type: "string" default: "not-running" tags: ["Container"] /containers/{id}: delete: summary: "Remove a container" operationId: "ContainerDelete" responses: 204: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "conflict" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: | You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "v" in: "query" description: "Remove anonymous volumes associated with the container." type: "boolean" default: false - name: "force" in: "query" description: "If the container is running, kill it before removing it." type: "boolean" default: false - name: "link" in: "query" description: "Remove the specified link associated with the container." type: "boolean" default: false tags: ["Container"] /containers/{id}/archive: head: summary: "Get information about files in a container" description: | A response header `X-Docker-Container-Path-Stat` is returned, containing a base64 - encoded JSON object with some filesystem header information about the path. operationId: "ContainerArchiveInfo" responses: 200: description: "no error" headers: X-Docker-Container-Path-Stat: type: "string" description: | A base64 - encoded JSON object with some filesystem header information about the path 400: description: "Bad parameter" schema: allOf: - $ref: "#/definitions/ErrorResponse" - type: "object" properties: message: description: | The error message. Either "must specify path parameter" (path cannot be empty) or "not a directory" (path was asserted to be a directory but exists as a file). type: "string" x-nullable: false 404: description: "Container or path does not exist" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Resource in the container’s filesystem to archive." type: "string" tags: ["Container"] get: summary: "Get an archive of a filesystem resource in a container" description: "Get a tar archive of a resource in the filesystem of container id." operationId: "ContainerArchive" produces: ["application/x-tar"] responses: 200: description: "no error" 400: description: "Bad parameter" schema: allOf: - $ref: "#/definitions/ErrorResponse" - type: "object" properties: message: description: | The error message. Either "must specify path parameter" (path cannot be empty) or "not a directory" (path was asserted to be a directory but exists as a file). type: "string" x-nullable: false 404: description: "Container or path does not exist" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Resource in the container’s filesystem to archive." type: "string" tags: ["Container"] put: summary: "Extract an archive of files or folders to a directory in a container" description: "Upload a tar archive to be extracted to a path in the filesystem of container id." operationId: "PutContainerArchive" consumes: ["application/x-tar", "application/octet-stream"] responses: 200: description: "The content was extracted successfully" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 403: description: "Permission denied, the volume or container rootfs is marked as read-only." schema: $ref: "#/definitions/ErrorResponse" 404: description: "No such container or path does not exist inside the container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Path to a directory in the container to extract the archive’s contents into. " type: "string" - name: "noOverwriteDirNonDir" in: "query" description: | If `1`, `true`, or `True` then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa. type: "string" - name: "copyUIDGID" in: "query" description: | If `1`, `true`, then it will copy UID/GID maps to the dest file or dir type: "string" - name: "inputStream" in: "body" required: true description: | The input stream must be a tar archive compressed with one of the following algorithms: `identity` (no compression), `gzip`, `bzip2`, or `xz`. schema: type: "string" format: "binary" tags: ["Container"] /containers/prune: post: summary: "Delete stopped containers" produces: - "application/json" operationId: "ContainerPrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "ContainerPruneResponse" properties: ContainersDeleted: description: "Container IDs that were deleted" type: "array" items: type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /images/json: get: summary: "List Images" description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." operationId: "ImageList" produces: - "application/json" responses: 200: description: "Summary image data for the images matching the query" schema: type: "array" items: $ref: "#/definitions/ImageSummary" examples: application/json: - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" ParentId: "" RepoTags: - "ubuntu:12.04" - "ubuntu:precise" RepoDigests: - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" Created: 1474925151 Size: 103579269 VirtualSize: 103579269 SharedSize: 0 Labels: {} Containers: 2 - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" ParentId: "" RepoTags: - "ubuntu:12.10" - "ubuntu:quantal" RepoDigests: - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" Created: 1403128455 Size: 172064416 VirtualSize: 172064416 SharedSize: 0 Labels: {} Containers: 5 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "all" in: "query" description: "Show all images. Only images from a final layer (no children) are shown by default." type: "boolean" default: false - name: "filters" in: "query" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) - `dangling=true` - `label=key` or `label="key=value"` of an image label - `reference`=(`<image-name>[:<tag>]`) - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) type: "string" - name: "shared-size" in: "query" description: "Compute and show shared size as a `SharedSize` field on each image." type: "boolean" default: false - name: "digests" in: "query" description: "Show digest information as a `RepoDigests` field on each image." type: "boolean" default: false tags: ["Image"] /build: post: summary: "Build an image" description: | Build an image from a tar archive with a `Dockerfile` in it. The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. The build is canceled if the client drops the connection by quitting or being killed. operationId: "ImageBuild" consumes: - "application/octet-stream" produces: - "application/json" parameters: - name: "inputStream" in: "body" description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." schema: type: "string" format: "binary" - name: "dockerfile" in: "query" description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." type: "string" default: "Dockerfile" - name: "t" in: "query" description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." type: "string" - name: "extrahosts" in: "query" description: "Extra hosts to add to /etc/hosts" type: "string" - name: "remote" in: "query" description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." type: "string" - name: "q" in: "query" description: "Suppress verbose build output." type: "boolean" default: false - name: "nocache" in: "query" description: "Do not use the cache when building the image." type: "boolean" default: false - name: "cachefrom" in: "query" description: "JSON array of images used for build cache resolution." type: "string" - name: "pull" in: "query" description: "Attempt to pull the image even if an older image exists locally." type: "string" - name: "rm" in: "query" description: "Remove intermediate containers after a successful build." type: "boolean" default: true - name: "forcerm" in: "query" description: "Always remove intermediate containers, even upon failure." type: "boolean" default: false - name: "memory" in: "query" description: "Set memory limit for build." type: "integer" - name: "memswap" in: "query" description: "Total memory (memory + swap). Set as `-1` to disable swap." type: "integer" - name: "cpushares" in: "query" description: "CPU shares (relative weight)." type: "integer" - name: "cpusetcpus" in: "query" description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." type: "string" - name: "cpuperiod" in: "query" description: "The length of a CPU period in microseconds." type: "integer" - name: "cpuquota" in: "query" description: "Microseconds of CPU time that the container can get in a CPU period." type: "integer" - name: "buildargs" in: "query" description: > JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) type: "string" - name: "shmsize" in: "query" description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." type: "integer" - name: "squash" in: "query" description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" type: "boolean" - name: "labels" in: "query" description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." type: "string" - name: "networkmode" in: "query" description: | Sets the networking mode for the run commands during build. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken as a custom network's name or ID to which this container should connect to. type: "string" - name: "Content-type" in: "header" type: "string" enum: - "application/x-tar" default: "application/x-tar" - name: "X-Registry-Config" in: "header" description: | This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: ``` { "docker.example.com": { "username": "janedoe", "password": "hunter2" }, "https://index.docker.io/v1/": { "username": "mobydock", "password": "conta1n3rize14" } } ``` Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. type: "string" - name: "platform" in: "query" description: "Platform in the format os[/arch[/variant]]" type: "string" default: "" - name: "target" in: "query" description: "Target build stage" type: "string" default: "" - name: "outputs" in: "query" description: "BuildKit output configuration" type: "string" default: "" responses: 200: description: "no error" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /build/prune: post: summary: "Delete builder cache" produces: - "application/json" operationId: "BuildPrune" parameters: - name: "keep-storage" in: "query" description: "Amount of disk space in bytes to keep for cache" type: "integer" format: "int64" - name: "all" in: "query" type: "boolean" description: "Remove all types of build cache" - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters: - `until=<duration>`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') - `id=<id>` - `parent=<id>` - `type=<string>` - `description=<string>` - `inuse` - `shared` - `private` responses: 200: description: "No error" schema: type: "object" title: "BuildPruneResponse" properties: CachesDeleted: type: "array" items: description: "ID of build cache object" type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /images/create: post: summary: "Create an image" description: "Create an image by either pulling it from a registry or importing it." operationId: "ImageCreate" consumes: - "text/plain" - "application/octet-stream" produces: - "application/json" responses: 200: description: "no error" 404: description: "repository does not exist or no read access" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "fromImage" in: "query" description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." type: "string" - name: "fromSrc" in: "query" description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." type: "string" - name: "repo" in: "query" description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." type: "string" - name: "tag" in: "query" description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." type: "string" - name: "message" in: "query" description: "Set commit message for imported image." type: "string" - name: "inputImage" in: "body" description: "Image content if the value `-` has been specified in fromSrc query parameter" schema: type: "string" required: false - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "changes" in: "query" description: | Apply `Dockerfile` instructions to the image that is created, for example: `changes=ENV DEBUG=true`. Note that `ENV DEBUG=true` should be URI component encoded. Supported `Dockerfile` instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` type: "array" items: type: "string" - name: "platform" in: "query" description: "Platform in the format os[/arch[/variant]]" type: "string" default: "" tags: ["Image"] /images/{name}/json: get: summary: "Inspect an image" description: "Return low-level information about an image." operationId: "ImageInspect" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/Image" examples: application/json: Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" Comment: "" Os: "linux" Architecture: "amd64" Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" ContainerConfig: Tty: false Hostname: "e611e15f9c9d" Domainname: "" AttachStdout: false PublishService: "" AttachStdin: false OpenStdin: false StdinOnce: false NetworkDisabled: false OnBuild: [] Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" User: "" WorkingDir: "" MacAddress: "" AttachStderr: false Labels: com.example.license: "GPL" com.example.version: "1.0" com.example.vendor: "Acme" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Cmd: - "/bin/sh" - "-c" - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" DockerVersion: "1.9.0-dev" VirtualSize: 188359297 Size: 0 Author: "" Created: "2015-09-10T08:30:53.26995814Z" GraphDriver: Name: "aufs" Data: {} RepoDigests: - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" RepoTags: - "example:1.0" - "example:latest" - "example:stable" Config: Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" NetworkDisabled: false OnBuild: [] StdinOnce: false PublishService: "" AttachStdin: false OpenStdin: false Domainname: "" AttachStdout: false Tty: false Hostname: "e611e15f9c9d" Cmd: - "/bin/bash" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Labels: com.example.vendor: "Acme" com.example.version: "1.0" com.example.license: "GPL" MacAddress: "" AttachStderr: false WorkingDir: "" User: "" RootFS: Type: "layers" Layers: - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: someimage (tag: latest)" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or id" type: "string" required: true tags: ["Image"] /images/{name}/history: get: summary: "Get the history of an image" description: "Return parent layers of an image." operationId: "ImageHistory" produces: ["application/json"] responses: 200: description: "List of image layers" schema: type: "array" items: type: "object" x-go-name: HistoryResponseItem title: "HistoryResponseItem" description: "individual image layer information in response to ImageHistory operation" required: [Id, Created, CreatedBy, Tags, Size, Comment] properties: Id: type: "string" x-nullable: false Created: type: "integer" format: "int64" x-nullable: false CreatedBy: type: "string" x-nullable: false Tags: type: "array" items: type: "string" Size: type: "integer" format: "int64" x-nullable: false Comment: type: "string" x-nullable: false examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" Created: 1398108230 CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" Tags: - "ubuntu:lucid" - "ubuntu:10.04" Size: 182964289 Comment: "" - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" Created: 1398108222 CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <[email protected]> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" Tags: [] Size: 0 Comment: "" - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" Created: 1371157430 CreatedBy: "" Tags: - "scratch12:latest" - "scratch:latest" Size: 0 Comment: "Imported from -" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true tags: ["Image"] /images/{name}/push: post: summary: "Push an image" description: | Push an image to a registry. If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. The push is cancelled if the HTTP connection is closed. operationId: "ImagePush" consumes: - "application/octet-stream" responses: 200: description: "No error" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID." type: "string" required: true - name: "tag" in: "query" description: "The tag to associate with the image on the registry." type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration. Refer to the [authentication section](#section/Authentication) for details. type: "string" required: true tags: ["Image"] /images/{name}/tag: post: summary: "Tag an image" description: "Tag an image so that it becomes part of a repository." operationId: "ImageTag" responses: 201: description: "No error" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID to tag." type: "string" required: true - name: "repo" in: "query" description: "The repository to tag in. For example, `someuser/someimage`." type: "string" - name: "tag" in: "query" description: "The name of the new tag." type: "string" tags: ["Image"] /images/{name}: delete: summary: "Remove an image" description: | Remove an image, along with any untagged parent images that were referenced by that image. Images can't be removed if they have descendant images, are being used by a running container or are being used by a build. operationId: "ImageDelete" produces: ["application/json"] responses: 200: description: "The image was deleted successfully" schema: type: "array" items: $ref: "#/definitions/ImageDeleteResponseItem" examples: application/json: - Untagged: "3e2f21a89f" - Deleted: "3e2f21a89f" - Deleted: "53b4f83ac9" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true - name: "force" in: "query" description: "Remove the image even if it is being used by stopped containers or has other tags" type: "boolean" default: false - name: "noprune" in: "query" description: "Do not delete untagged parent images" type: "boolean" default: false tags: ["Image"] /images/search: get: summary: "Search images" description: "Search for an image on Docker Hub." operationId: "ImageSearch" produces: - "application/json" responses: 200: description: "No error" schema: type: "array" items: type: "object" title: "ImageSearchResponseItem" properties: description: type: "string" is_official: type: "boolean" is_automated: type: "boolean" name: type: "string" star_count: type: "integer" examples: application/json: - description: "" is_official: false is_automated: false name: "wma55/u1210sshd" star_count: 0 - description: "" is_official: false is_automated: false name: "jdswinbank/sshd" star_count: 0 - description: "" is_official: false is_automated: false name: "vgauthier/sshd" star_count: 0 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "term" in: "query" description: "Term to search" type: "string" required: true - name: "limit" in: "query" description: "Maximum number of results to return" type: "integer" - name: "filters" in: "query" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - `is-automated=(true|false)` - `is-official=(true|false)` - `stars=<number>` Matches images that has at least 'number' stars. type: "string" tags: ["Image"] /images/prune: post: summary: "Delete unused images" produces: - "application/json" operationId: "ImagePrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `dangling=<boolean>` When set to `true` (or `1`), prune only unused *and* untagged images. When set to `false` (or `0`), all unused images are pruned. - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "ImagePruneResponse" properties: ImagesDeleted: description: "Images that were deleted" type: "array" items: $ref: "#/definitions/ImageDeleteResponseItem" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /auth: post: summary: "Check auth configuration" description: | Validate credentials for a registry and, if available, get an identity token for accessing the registry without password. operationId: "SystemAuth" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "An identity token was generated successfully." schema: type: "object" title: "SystemAuthResponse" required: [Status] properties: Status: description: "The status of the authentication" type: "string" x-nullable: false IdentityToken: description: "An opaque token used to authenticate a user after a successful login" type: "string" x-nullable: false examples: application/json: Status: "Login Succeeded" IdentityToken: "9cbaf023786cd7..." 204: description: "No error" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "authConfig" in: "body" description: "Authentication to check" schema: $ref: "#/definitions/AuthConfig" tags: ["System"] /info: get: summary: "Get system information" operationId: "SystemInfo" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/SystemInfo" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /version: get: summary: "Get version" description: "Returns the version of Docker that is running and various information about the system that Docker is running on." operationId: "SystemVersion" produces: ["application/json"] responses: 200: description: "no error" schema: $ref: "#/definitions/SystemVersion" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /_ping: get: summary: "Ping" description: "This is a dummy endpoint you can use to test if the server is accessible." operationId: "SystemPing" produces: ["text/plain"] responses: 200: description: "no error" schema: type: "string" example: "OK" headers: API-Version: type: "string" description: "Max API Version the server supports" Builder-Version: type: "string" description: "Default version of docker image builder" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" headers: Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" tags: ["System"] head: summary: "Ping" description: "This is a dummy endpoint you can use to test if the server is accessible." operationId: "SystemPingHead" produces: ["text/plain"] responses: 200: description: "no error" schema: type: "string" example: "(empty)" headers: API-Version: type: "string" description: "Max API Version the server supports" Builder-Version: type: "string" description: "Default version of docker image builder" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /commit: post: summary: "Create a new image from a container" operationId: "ImageCommit" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "containerConfig" in: "body" description: "The container configuration" schema: $ref: "#/definitions/ContainerConfig" - name: "container" in: "query" description: "The ID or name of the container to commit" type: "string" - name: "repo" in: "query" description: "Repository name for the created image" type: "string" - name: "tag" in: "query" description: "Tag name for the create image" type: "string" - name: "comment" in: "query" description: "Commit message" type: "string" - name: "author" in: "query" description: "Author of the image (e.g., `John Hannibal Smith <[email protected]>`)" type: "string" - name: "pause" in: "query" description: "Whether to pause the container before committing" type: "boolean" default: true - name: "changes" in: "query" description: "`Dockerfile` instructions to apply while committing" type: "string" tags: ["Image"] /events: get: summary: "Monitor events" description: | Stream real-time events from the server. Various objects within Docker report events when something happens to them. Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` The Docker daemon reports these events: `reload` Services report these events: `create`, `update`, and `remove` Nodes report these events: `create`, `update`, and `remove` Secrets report these events: `create`, `update`, and `remove` Configs report these events: `create`, `update`, and `remove` The Builder reports `prune` events operationId: "SystemEvents" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/EventMessage" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "since" in: "query" description: "Show events created since this timestamp then stream new events." type: "string" - name: "until" in: "query" description: "Show events created until this timestamp then stop streaming." type: "string" - name: "filters" in: "query" description: | A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: - `config=<string>` config name or ID - `container=<string>` container name or ID - `daemon=<string>` daemon name or ID - `event=<string>` event type - `image=<string>` image name or ID - `label=<string>` image or container label - `network=<string>` network name or ID - `node=<string>` node ID - `plugin`=<string> plugin name or ID - `scope`=<string> local or swarm - `secret=<string>` secret name or ID - `service=<string>` service name or ID - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` - `volume=<string>` volume name type: "string" tags: ["System"] /system/df: get: summary: "Get data usage information" operationId: "SystemDataUsage" responses: 200: description: "no error" schema: type: "object" title: "SystemDataUsageResponse" properties: LayersSize: type: "integer" format: "int64" Images: type: "array" items: $ref: "#/definitions/ImageSummary" Containers: type: "array" items: $ref: "#/definitions/ContainerSummary" Volumes: type: "array" items: $ref: "#/definitions/Volume" BuildCache: type: "array" items: $ref: "#/definitions/BuildCache" example: LayersSize: 1092588 Images: - Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" ParentId: "" RepoTags: - "busybox:latest" RepoDigests: - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" Created: 1466724217 Size: 1092588 SharedSize: 0 VirtualSize: 1092588 Labels: {} Containers: 1 Containers: - Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" Names: - "/top" Image: "busybox" ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" Command: "top" Created: 1472592424 Ports: [] SizeRootFs: 1092588 Labels: {} State: "exited" Status: "Exited (0) 56 minutes ago" HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: IPAMConfig: null Links: null Aliases: null NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" Gateway: "172.18.0.1" IPAddress: "172.18.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:12:00:02" Mounts: [] Volumes: - Name: "my-volume" Driver: "local" Mountpoint: "/var/lib/docker/volumes/my-volume/_data" Labels: null Scope: "local" Options: null UsageData: Size: 10920104 RefCount: 2 BuildCache: - ID: "hw53o5aio51xtltp5xjp8v7fx" Parent: "" Type: "regular" Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" InUse: false Shared: true Size: 0 CreatedAt: "2021-06-28T13:31:01.474619385Z" LastUsedAt: "2021-07-07T22:02:32.738075951Z" UsageCount: 26 - ID: "ndlpt0hhvkqcdfkputsk4cq9c" Parent: "hw53o5aio51xtltp5xjp8v7fx" Type: "regular" Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" InUse: false Shared: true Size: 51 CreatedAt: "2021-06-28T13:31:03.002625487Z" LastUsedAt: "2021-07-07T22:02:32.773909517Z" UsageCount: 26 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "type" in: "query" description: | Object types, for which to compute and return data. type: "array" collectionFormat: multi items: type: "string" enum: ["container", "image", "volume", "build-cache"] tags: ["System"] /images/{name}/get: get: summary: "Export an image" description: | Get a tarball containing all images and metadata for a repository. If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. ### Image tarball format An image tarball contains one directory per image layer (named using its long ID), each containing these files: - `VERSION`: currently `1.0` - the file format version - `json`: detailed layer information, similar to `docker inspect layer_id` - `layer.tar`: A tarfile containing the filesystem changes in this layer The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ```json { "hello-world": { "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" } } ``` operationId: "ImageGet" produces: - "application/x-tar" responses: 200: description: "no error" schema: type: "string" format: "binary" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true tags: ["Image"] /images/get: get: summary: "Export several images" description: | Get a tarball containing all images and metadata for several image repositories. For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageGetAll" produces: - "application/x-tar" responses: 200: description: "no error" schema: type: "string" format: "binary" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "names" in: "query" description: "Image names to filter by" type: "array" items: type: "string" tags: ["Image"] /images/load: post: summary: "Import images" description: | Load a set of images and tags into a repository. For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageLoad" consumes: - "application/x-tar" produces: - "application/json" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "imagesTarball" in: "body" description: "Tar archive containing images" schema: type: "string" format: "binary" - name: "quiet" in: "query" description: "Suppress progress details during load." type: "boolean" default: false tags: ["Image"] /containers/{id}/exec: post: summary: "Create an exec instance" description: "Run a command inside a running container." operationId: "ContainerExec" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "container is paused" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "execConfig" in: "body" description: "Exec configuration" schema: type: "object" title: "ExecConfig" properties: AttachStdin: type: "boolean" description: "Attach to `stdin` of the exec command." AttachStdout: type: "boolean" description: "Attach to `stdout` of the exec command." AttachStderr: type: "boolean" description: "Attach to `stderr` of the exec command." DetachKeys: type: "string" description: | Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Tty: type: "boolean" description: "Allocate a pseudo-TTY." Env: description: | A list of environment variables in the form `["VAR=value", ...]`. type: "array" items: type: "string" Cmd: type: "array" description: "Command to run, as a string or array of strings." items: type: "string" Privileged: type: "boolean" description: "Runs the exec process with extended privileges." default: false User: type: "string" description: | The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`. WorkingDir: type: "string" description: | The working directory for the exec process inside the container. example: AttachStdin: false AttachStdout: true AttachStderr: true DetachKeys: "ctrl-p,ctrl-q" Tty: false Cmd: - "date" Env: - "FOO=bar" - "BAZ=quux" required: true - name: "id" in: "path" description: "ID or name of container" type: "string" required: true tags: ["Exec"] /exec/{id}/start: post: summary: "Start an exec instance" description: | Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command. operationId: "ExecStart" consumes: - "application/json" produces: - "application/vnd.docker.raw-stream" responses: 200: description: "No error" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Container is stopped or paused" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "execStartConfig" in: "body" schema: type: "object" title: "ExecStartConfig" properties: Detach: type: "boolean" description: "Detach from the command." Tty: type: "boolean" description: "Allocate a pseudo-TTY." example: Detach: false Tty: false - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" tags: ["Exec"] /exec/{id}/resize: post: summary: "Resize an exec instance" description: | Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance. operationId: "ExecResize" responses: 201: description: "No error" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" - name: "h" in: "query" description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" description: "Width of the TTY session in characters" type: "integer" tags: ["Exec"] /exec/{id}/json: get: summary: "Inspect an exec instance" description: "Return low-level information about an exec instance." operationId: "ExecInspect" produces: - "application/json" responses: 200: description: "No error" schema: type: "object" title: "ExecInspectResponse" properties: CanRemove: type: "boolean" DetachKeys: type: "string" ID: type: "string" Running: type: "boolean" ExitCode: type: "integer" ProcessConfig: $ref: "#/definitions/ProcessConfig" OpenStdin: type: "boolean" OpenStderr: type: "boolean" OpenStdout: type: "boolean" ContainerID: type: "string" Pid: type: "integer" description: "The system process ID for the exec process." examples: application/json: CanRemove: false ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" DetachKeys: "" ExitCode: 2 ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" OpenStderr: true OpenStdin: true OpenStdout: true ProcessConfig: arguments: - "-c" - "exit 2" entrypoint: "sh" privileged: false tty: true user: "1000" Running: false Pid: 42000 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" tags: ["Exec"] /volumes: get: summary: "List volumes" operationId: "VolumeList" produces: ["application/json"] responses: 200: description: "Summary volume data that matches the query" schema: type: "object" title: "VolumeListResponse" description: "Volume list response" required: [Volumes, Warnings] properties: Volumes: type: "array" x-nullable: false description: "List of volumes" items: $ref: "#/definitions/Volume" Warnings: type: "array" x-nullable: false description: | Warnings that occurred when fetching the list of volumes. items: type: "string" examples: application/json: Volumes: - CreatedAt: "2017-07-19T12:00:26Z" Name: "tardis" Driver: "local" Mountpoint: "/var/lib/docker/volumes/tardis" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Scope: "local" Options: device: "tmpfs" o: "size=100m,uid=1000" type: "tmpfs" Warnings: [] 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. Available filters: - `dangling=<boolean>` When set to `true` (or `1`), returns all volumes that are not in use by a container. When set to `false` (or `0`), only volumes that are in use by one or more containers are returned. - `driver=<volume-driver-name>` Matches volumes based on their driver. - `label=<key>` or `label=<key>:<value>` Matches volumes based on the presence of a `label` alone or a `label` and a value. - `name=<volume-name>` Matches all or part of a volume name. type: "string" format: "json" tags: ["Volume"] /volumes/create: post: summary: "Create a volume" operationId: "VolumeCreate" consumes: ["application/json"] produces: ["application/json"] responses: 201: description: "The volume was created successfully" schema: $ref: "#/definitions/Volume" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "volumeConfig" in: "body" required: true description: "Volume configuration" schema: type: "object" description: "Volume configuration" title: "VolumeConfig" properties: Name: description: | The new volume's name. If not specified, Docker generates a name. type: "string" x-nullable: false Driver: description: "Name of the volume driver to use." type: "string" default: "local" x-nullable: false DriverOpts: description: | A mapping of driver options and values. These options are passed directly to the driver and are driver specific. type: "object" additionalProperties: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: Name: "tardis" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Driver: "custom" tags: ["Volume"] /volumes/{name}: get: summary: "Inspect a volume" operationId: "VolumeInspect" produces: ["application/json"] responses: 200: description: "No error" schema: $ref: "#/definitions/Volume" 404: description: "No such volume" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" required: true description: "Volume name or ID" type: "string" tags: ["Volume"] delete: summary: "Remove a volume" description: "Instruct the driver to remove the volume." operationId: "VolumeDelete" responses: 204: description: "The volume was removed" 404: description: "No such volume or volume driver" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Volume is in use and cannot be removed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" required: true description: "Volume name or ID" type: "string" - name: "force" in: "query" description: "Force the removal of the volume" type: "boolean" default: false tags: ["Volume"] /volumes/prune: post: summary: "Delete unused volumes" produces: - "application/json" operationId: "VolumePrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "VolumePruneResponse" properties: VolumesDeleted: description: "Volumes that were deleted" type: "array" items: type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Volume"] /networks: get: summary: "List networks" description: | Returns a list of networks. For details on the format, see the [network inspect endpoint](#operation/NetworkInspect). Note that it uses a different, smaller representation of a network than inspecting a single network. For example, the list of containers attached to the network is not propagated in API versions 1.28 and up. operationId: "NetworkList" produces: - "application/json" responses: 200: description: "No error" schema: type: "array" items: $ref: "#/definitions/Network" examples: application/json: - Name: "bridge" Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" Created: "2016-10-19T06:21:00.416543526Z" Scope: "local" Driver: "bridge" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: - Subnet: "172.17.0.0/16" Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" - Name: "none" Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "null" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: [] Containers: {} Options: {} - Name: "host" Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "host" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: [] Containers: {} Options: {} 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: - `dangling=<boolean>` When set to `true` (or `1`), returns all networks that are not in use by a container. When set to `false` (or `0`), only networks that are in use by one or more containers are returned. - `driver=<driver-name>` Matches a network's driver. - `id=<network-id>` Matches all or part of a network ID. - `label=<key>` or `label=<key>=<value>` of a network label. - `name=<network-name>` Matches all or part of a network name. - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. type: "string" tags: ["Network"] /networks/{id}: get: summary: "Inspect a network" operationId: "NetworkInspect" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/Network" 404: description: "Network not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "verbose" in: "query" description: "Detailed inspect output for troubleshooting" type: "boolean" default: false - name: "scope" in: "query" description: "Filter the network by scope (swarm, global, or local)" type: "string" tags: ["Network"] delete: summary: "Remove a network" operationId: "NetworkDelete" responses: 204: description: "No error" 403: description: "operation not supported for pre-defined networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such network" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" tags: ["Network"] /networks/create: post: summary: "Create a network" operationId: "NetworkCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "No error" schema: type: "object" title: "NetworkCreateResponse" properties: Id: description: "The ID of the created network." type: "string" Warning: type: "string" example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" 403: description: "operation not supported for pre-defined networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "plugin not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "networkConfig" in: "body" description: "Network configuration" required: true schema: type: "object" title: "NetworkCreateRequest" required: ["Name"] properties: Name: description: "The network's name." type: "string" CheckDuplicate: description: | Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions. type: "boolean" Driver: description: "Name of the network driver plugin to use." type: "string" default: "bridge" Internal: description: "Restrict external access to the network." type: "boolean" Attachable: description: | Globally scoped network is manually attachable by regular containers from workers in swarm mode. type: "boolean" Ingress: description: | Ingress network is the network which provides the routing-mesh in swarm mode. type: "boolean" IPAM: description: "Optional custom IP scheme for the network." $ref: "#/definitions/IPAM" EnableIPv6: description: "Enable IPv6 on the network." type: "boolean" Options: description: "Network specific options to be used by the drivers." type: "object" additionalProperties: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: Name: "isolated_nw" CheckDuplicate: false Driver: "bridge" EnableIPv6: true IPAM: Driver: "default" Config: - Subnet: "172.20.0.0/16" IPRange: "172.20.10.0/24" Gateway: "172.20.10.11" - Subnet: "2001:db8:abcd::/64" Gateway: "2001:db8:abcd::1011" Options: foo: "bar" Internal: true Attachable: false Ingress: false Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" tags: ["Network"] /networks/{id}/connect: post: summary: "Connect a container to a network" operationId: "NetworkConnect" consumes: - "application/json" responses: 200: description: "No error" 403: description: "Operation not supported for swarm scoped networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Network or container not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "container" in: "body" required: true schema: type: "object" title: "NetworkConnectRequest" properties: Container: type: "string" description: "The ID or name of the container to connect to the network." EndpointConfig: $ref: "#/definitions/EndpointSettings" example: Container: "3613f73ba0e4" EndpointConfig: IPAMConfig: IPv4Address: "172.24.56.89" IPv6Address: "2001:db8::5689" tags: ["Network"] /networks/{id}/disconnect: post: summary: "Disconnect a container from a network" operationId: "NetworkDisconnect" consumes: - "application/json" responses: 200: description: "No error" 403: description: "Operation not supported for swarm scoped networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Network or container not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "container" in: "body" required: true schema: type: "object" title: "NetworkDisconnectRequest" properties: Container: type: "string" description: | The ID or name of the container to disconnect from the network. Force: type: "boolean" description: | Force the container to disconnect from the network. tags: ["Network"] /networks/prune: post: summary: "Delete unused networks" produces: - "application/json" operationId: "NetworkPrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "NetworkPruneResponse" properties: NetworksDeleted: description: "Networks that were deleted" type: "array" items: type: "string" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Network"] /plugins: get: summary: "List plugins" operationId: "PluginList" description: "Returns information about installed plugins." produces: ["application/json"] responses: 200: description: "No error" schema: type: "array" items: $ref: "#/definitions/Plugin" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: - `capability=<capability name>` - `enable=<true>|<false>` tags: ["Plugin"] /plugins/privileges: get: summary: "Get plugin privileges" operationId: "GetPluginPrivileges" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "remote" in: "query" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: - "Plugin" /plugins/pull: post: summary: "Install a plugin" operationId: "PluginPull" description: | Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). produces: - "application/json" responses: 204: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "remote" in: "query" description: | Remote reference for plugin to install. The `:latest` tag is optional, and is used as the default if omitted. required: true type: "string" - name: "name" in: "query" description: | Local name for the pulled plugin. The `:latest` tag is optional, and is used as the default if omitted. required: false type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration to use when pulling a plugin from a registry. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "body" in: "body" schema: type: "array" items: $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" tags: ["Plugin"] /plugins/{name}/json: get: summary: "Inspect a plugin" operationId: "PluginInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Plugin" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: ["Plugin"] /plugins/{name}: delete: summary: "Remove a plugin" operationId: "PluginDelete" responses: 200: description: "no error" schema: $ref: "#/definitions/Plugin" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "force" in: "query" description: | Disable the plugin before removing. This may result in issues if the plugin is in use by a container. type: "boolean" default: false tags: ["Plugin"] /plugins/{name}/enable: post: summary: "Enable a plugin" operationId: "PluginEnable" responses: 200: description: "no error" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "timeout" in: "query" description: "Set the HTTP client timeout (in seconds)" type: "integer" default: 0 tags: ["Plugin"] /plugins/{name}/disable: post: summary: "Disable a plugin" operationId: "PluginDisable" responses: 200: description: "no error" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: ["Plugin"] /plugins/{name}/upgrade: post: summary: "Upgrade a plugin" operationId: "PluginUpgrade" responses: 204: description: "no error" 404: description: "plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "remote" in: "query" description: | Remote reference to upgrade to. The `:latest` tag is optional, and is used as the default if omitted. required: true type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration to use when pulling a plugin from a registry. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "body" in: "body" schema: type: "array" items: $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" tags: ["Plugin"] /plugins/create: post: summary: "Create a plugin" operationId: "PluginCreate" consumes: - "application/x-tar" responses: 204: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "query" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "tarContext" in: "body" description: "Path to tar containing plugin rootfs and manifest" schema: type: "string" format: "binary" tags: ["Plugin"] /plugins/{name}/push: post: summary: "Push a plugin" operationId: "PluginPush" description: | Push a plugin to the registry. parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" responses: 200: description: "no error" 404: description: "plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Plugin"] /plugins/{name}/set: post: summary: "Configure a plugin" operationId: "PluginSet" consumes: - "application/json" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "body" in: "body" schema: type: "array" items: type: "string" example: ["DEBUG=1"] responses: 204: description: "No error" 404: description: "Plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Plugin"] /nodes: get: summary: "List nodes" operationId: "NodeList" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Node" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). Available filters: - `id=<node id>` - `label=<engine label>` - `membership=`(`accepted`|`pending`)` - `name=<node name>` - `node.label=<node label>` - `role=`(`manager`|`worker`)` type: "string" tags: ["Node"] /nodes/{id}: get: summary: "Inspect a node" operationId: "NodeInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Node" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the node" type: "string" required: true tags: ["Node"] delete: summary: "Delete a node" operationId: "NodeDelete" responses: 200: description: "no error" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the node" type: "string" required: true - name: "force" in: "query" description: "Force remove a node from the swarm" default: false type: "boolean" tags: ["Node"] /nodes/{id}/update: post: summary: "Update a node" operationId: "NodeUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID of the node" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/NodeSpec" - name: "version" in: "query" description: | The version number of the node object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Node"] /swarm: get: summary: "Inspect swarm" operationId: "SwarmInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Swarm" 404: description: "no such swarm" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /swarm/init: post: summary: "Initialize a new swarm" operationId: "SwarmInit" produces: - "application/json" - "text/plain" responses: 200: description: "no error" schema: description: "The node ID" type: "string" example: "7v2t30z9blmxuhnyo6s4cpenp" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is already part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmInitRequest" properties: ListenAddr: description: | Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used. type: "string" AdvertiseAddr: description: | Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`, or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is possible to separate the container data traffic from the management traffic of the cluster. type: "string" DataPathPort: description: | DataPathPort specifies the data path port number for data traffic. Acceptable port range is 1024 to 49151. if no port is set or is set to 0, default port 4789 will be used. type: "integer" format: "uint32" DefaultAddrPool: description: | Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" example: ["10.10.0.0/16", "20.20.0.0/16"] ForceNewCluster: description: "Force creation of a new swarm." type: "boolean" SubnetSize: description: | SubnetSize specifies the subnet size of the networks created from the default subnet pool. type: "integer" format: "uint32" Spec: $ref: "#/definitions/SwarmSpec" example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" DataPathPort: 4789 DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] SubnetSize: 24 ForceNewCluster: false Spec: Orchestration: {} Raft: {} Dispatcher: {} CAConfig: {} EncryptionConfig: AutoLockManagers: false tags: ["Swarm"] /swarm/join: post: summary: "Join an existing swarm" operationId: "SwarmJoin" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is already part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmJoinRequest" properties: ListenAddr: description: | Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). type: "string" AdvertiseAddr: description: | Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`, or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is possible to separate the container data traffic from the management traffic of the cluster. type: "string" RemoteAddrs: description: | Addresses of manager nodes already participating in the swarm. type: "array" items: type: "string" JoinToken: description: "Secret token for joining this swarm." type: "string" example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" RemoteAddrs: - "node1:2377" JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" tags: ["Swarm"] /swarm/leave: post: summary: "Leave a swarm" operationId: "SwarmLeave" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "force" description: | Force leave swarm, even if this is the last manager or that it will break the cluster. in: "query" type: "boolean" default: false tags: ["Swarm"] /swarm/update: post: summary: "Update a swarm" operationId: "SwarmUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: $ref: "#/definitions/SwarmSpec" - name: "version" in: "query" description: | The version number of the swarm object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true - name: "rotateWorkerToken" in: "query" description: "Rotate the worker join token." type: "boolean" default: false - name: "rotateManagerToken" in: "query" description: "Rotate the manager join token." type: "boolean" default: false - name: "rotateManagerUnlockKey" in: "query" description: "Rotate the manager unlock key." type: "boolean" default: false tags: ["Swarm"] /swarm/unlockkey: get: summary: "Get the unlock key" operationId: "SwarmUnlockkey" consumes: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "UnlockKeyResponse" properties: UnlockKey: description: "The swarm's unlock key." type: "string" example: UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /swarm/unlock: post: summary: "Unlock a locked manager" operationId: "SwarmUnlock" consumes: - "application/json" produces: - "application/json" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmUnlockRequest" properties: UnlockKey: description: "The swarm's unlock key." type: "string" example: UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /services: get: summary: "List services" operationId: "ServiceList" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Service" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: - `id=<service id>` - `label=<service label>` - `mode=["replicated"|"global"]` - `name=<service name>` - name: "status" in: "query" type: "boolean" description: | Include service status, with count of running and desired tasks. tags: ["Service"] /services/create: post: summary: "Create a service" operationId: "ServiceCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: type: "object" title: "ServiceCreateResponse" properties: ID: description: "The ID of the created service." type: "string" Warning: description: "Optional warning message" type: "string" example: ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 403: description: "network is not eligible for services" schema: $ref: "#/definitions/ErrorResponse" 409: description: "name conflicts with an existing service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: allOf: - $ref: "#/definitions/ServiceSpec" - type: "object" example: Name: "web" TaskTemplate: ContainerSpec: Image: "nginx:alpine" Mounts: - ReadOnly: true Source: "web-data" Target: "/usr/share/nginx/html" Type: "volume" VolumeOptions: DriverConfig: {} Labels: com.example.something: "something-value" Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] User: "33" DNSConfig: Nameservers: ["8.8.8.8"] Search: ["example.org"] Options: ["timeout:3"] Secrets: - File: Name: "www.example.org.key" UID: "33" GID: "33" Mode: 384 SecretID: "fpjqlhnwb19zds35k8wn80lq9" SecretName: "example_org_domain_key" LogDriver: Name: "json-file" Options: max-file: "3" max-size: "10M" Placement: {} Resources: Limits: MemoryBytes: 104857600 Reservations: {} RestartPolicy: Condition: "on-failure" Delay: 10000000000 MaxAttempts: 10 Mode: Replicated: Replicas: 4 UpdateConfig: Parallelism: 2 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Ports: - Protocol: "tcp" PublishedPort: 8080 TargetPort: 80 Labels: foo: "bar" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration for pulling from private registries. Refer to the [authentication section](#section/Authentication) for details. type: "string" tags: ["Service"] /services/{id}: get: summary: "Inspect a service" operationId: "ServiceInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Service" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" - name: "insertDefaults" in: "query" description: "Fill empty fields with default values." type: "boolean" default: false tags: ["Service"] delete: summary: "Delete a service" operationId: "ServiceDelete" responses: 200: description: "no error" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" tags: ["Service"] /services/{id}/update: post: summary: "Update a service" operationId: "ServiceUpdate" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "no error" schema: $ref: "#/definitions/ServiceUpdateResponse" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" - name: "body" in: "body" required: true schema: allOf: - $ref: "#/definitions/ServiceSpec" - type: "object" example: Name: "top" TaskTemplate: ContainerSpec: Image: "busybox" Args: - "top" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ForceUpdate: 0 Mode: Replicated: Replicas: 1 UpdateConfig: Parallelism: 2 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Mode: "vip" - name: "version" in: "query" description: | The version number of the service object being updated. This is required to avoid conflicting writes. This version number should be the value as currently set on the service *before* the update. You can find the current version by calling `GET /services/{id}` required: true type: "integer" - name: "registryAuthFrom" in: "query" description: | If the `X-Registry-Auth` header is not specified, this parameter indicates where to find registry authorization credentials. type: "string" enum: ["spec", "previous-spec"] default: "spec" - name: "rollback" in: "query" description: | Set to this parameter to `previous` to cause a server-side rollback to the previous service spec. The supplied spec will be ignored in this case. type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration for pulling from private registries. Refer to the [authentication section](#section/Authentication) for details. type: "string" tags: ["Service"] /services/{id}/logs: get: summary: "Get service logs" description: | Get `stdout` and `stderr` logs from a service. See also [`/containers/{id}/logs`](#operation/ContainerLogs). **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "ServiceLogs" responses: 200: description: "logs returned as a stream in response body" schema: type: "string" format: "binary" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such service: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the service" type: "string" - name: "details" in: "query" description: "Show service context and extra details provided to logs." type: "boolean" default: false - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Service"] /tasks: get: summary: "List tasks" operationId: "TaskList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Task" example: - ID: "0kzzo1i0y4jz6027t0k7aezc7" Version: Index: 71 CreatedAt: "2016-06-07T21:07:31.171892745Z" UpdatedAt: "2016-06-07T21:07:31.376370513Z" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:31.290032978Z" State: "running" Message: "started" ContainerStatus: ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" PID: 677 DesiredState: "running" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.10/16" - ID: "1yljwbmlr8er2waf8orvqpwms" Version: Index: 30 CreatedAt: "2016-06-07T21:07:30.019104782Z" UpdatedAt: "2016-06-07T21:07:30.231958098Z" Name: "hopeful_cori" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:30.202183143Z" State: "shutdown" Message: "shutdown" ContainerStatus: ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" DesiredState: "shutdown" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.5/16" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: - `desired-state=(running | shutdown | accepted)` - `id=<task id>` - `label=key` or `label="key=value"` - `name=<task name>` - `node=<node id or name>` - `service=<service name>` tags: ["Task"] /tasks/{id}: get: summary: "Inspect a task" operationId: "TaskInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Task" 404: description: "no such task" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID of the task" required: true type: "string" tags: ["Task"] /tasks/{id}/logs: get: summary: "Get task logs" description: | Get `stdout` and `stderr` logs from a task. See also [`/containers/{id}/logs`](#operation/ContainerLogs). **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "TaskLogs" responses: 200: description: "logs returned as a stream in response body" schema: type: "string" format: "binary" 404: description: "no such task" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such task: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID of the task" type: "string" - name: "details" in: "query" description: "Show task context and extra details provided to logs." type: "boolean" default: false - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Task"] /secrets: get: summary: "List secrets" operationId: "SecretList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Secret" example: - ID: "blt1owaxmitz71s9v5zh81zun" Version: Index: 85 CreatedAt: "2017-07-20T13:55:28.678958722Z" UpdatedAt: "2017-07-20T13:55:28.678958722Z" Spec: Name: "mysql-passwd" Labels: some.label: "some.value" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" - ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" Labels: foo: "bar" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: - `id=<secret id>` - `label=<key> or label=<key>=value` - `name=<secret name>` - `names=<secret name>` tags: ["Secret"] /secrets/create: post: summary: "Create a secret" operationId: "SecretCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 409: description: "name conflicts with an existing object" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" schema: allOf: - $ref: "#/definitions/SecretSpec" - type: "object" example: Name: "app-key.crt" Labels: foo: "bar" Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" tags: ["Secret"] /secrets/{id}: get: summary: "Inspect a secret" operationId: "SecretInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Secret" examples: application/json: ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" Labels: foo: "bar" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" 404: description: "secret not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the secret" tags: ["Secret"] delete: summary: "Delete a secret" operationId: "SecretDelete" produces: - "application/json" responses: 204: description: "no error" 404: description: "secret not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the secret" tags: ["Secret"] /secrets/{id}/update: post: summary: "Update a Secret" operationId: "SecretUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such secret" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the secret" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/SecretSpec" description: | The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values. - name: "version" in: "query" description: | The version number of the secret object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Secret"] /configs: get: summary: "List configs" operationId: "ConfigList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Config" example: - ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "server.conf" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters: - `id=<config id>` - `label=<key> or label=<key>=value` - `name=<config name>` - `names=<config name>` tags: ["Config"] /configs/create: post: summary: "Create a config" operationId: "ConfigCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 409: description: "name conflicts with an existing object" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" schema: allOf: - $ref: "#/definitions/ConfigSpec" - type: "object" example: Name: "server.conf" Labels: foo: "bar" Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" tags: ["Config"] /configs/{id}: get: summary: "Inspect a config" operationId: "ConfigInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Config" examples: application/json: ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" 404: description: "config not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the config" tags: ["Config"] delete: summary: "Delete a config" operationId: "ConfigDelete" produces: - "application/json" responses: 204: description: "no error" 404: description: "config not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the config" tags: ["Config"] /configs/{id}/update: post: summary: "Update a Config" operationId: "ConfigUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such config" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the config" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/ConfigSpec" description: | The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values. - name: "version" in: "query" description: | The version number of the config object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Config"] /distribution/{name}/json: get: summary: "Get image information from the registry" description: | Return image digest and platform information by contacting the registry. operationId: "DistributionInspect" produces: - "application/json" responses: 200: description: "descriptor and platform information" schema: $ref: "#/definitions/DistributionInspect" 401: description: "Failed authentication or no image found" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: someimage (tag: latest)" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or id" type: "string" required: true tags: ["Distribution"] /session: post: summary: "Initialize interactive session" description: | Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. ### Hijacking This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. For example, the client sends this request to upgrade the connection: ``` POST /session HTTP/1.1 Upgrade: h2c Connection: Upgrade ``` The Docker daemon responds with a `101 UPGRADED` response follow with the raw stream: ``` HTTP/1.1 101 UPGRADED Connection: Upgrade Upgrade: h2c ``` operationId: "Session" produces: - "application/vnd.docker.raw-stream" responses: 101: description: "no error, hijacking successful" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Session"]
thaJeztah
8fa6126f75f255805fb7f6ffa716d03b4cc7f76d
772e25fa9f00577ba9f6641530e5aad5ec5ff84c
Per my other comment, this would link to https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md, for example. Do you think that's worthwhile, or just overkill?
tianon
4,492
moby/moby
42,769
swagger: assorted fixes and updates
includes some follow-ups to https://github.com/moby/moby/pull/42621 - api/swagger: fix up event-types and move to definitions - api/swagger: rename PluginPrivilegeItem to PluginPrivilege - api/swagger: move DistributionInspect to definitions **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-08-21 22:31:49+00:00
2021-09-02 21:23:49+00:00
api/swagger.yaml
# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. # # This is used for generating API documentation and the types used by the # client/server. See api/README.md for more information. # # Some style notes: # - This file is used by ReDoc, which allows GitHub Flavored Markdown in # descriptions. # - There is no maximum line length, for ease of editing and pretty diffs. # - operationIds are in the format "NounVerb", with a singular noun. swagger: "2.0" schemes: - "http" - "https" produces: - "application/json" - "text/plain" consumes: - "application/json" - "text/plain" basePath: "/v1.42" info: title: "Docker Engine API" version: "1.42" x-logo: url: "https://docs.docker.com/images/logo-docker-main.png" description: | The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. # Errors The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: ``` { "message": "page not found" } ``` # Versioning The API is usually changed in each release, so API calls are versioned to ensure that clients don't break. To lock to a specific version of the API, you prefix the URL with its version, for example, call `/v1.30/info` to use the v1.30 version of the `/info` endpoint. If the API version specified in the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. If you omit the version-prefix, the current version of the API (v1.42) is used. For example, calling `/info` is the same as calling `/v1.42/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer daemons. # Authentication Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) (JSON) string with the following structure: ``` { "username": "string", "password": "string", "email": "string", "serveraddress": "string" } ``` The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: ``` { "identitytoken": "9cbaf023786cd7..." } ``` # The tags on paths define the menu sections in the ReDoc documentation, so # the usage of tags must make sense for that: # - They should be singular, not plural. # - There should not be too many tags, or the menu becomes unwieldy. For # example, it is preferable to add a path to the "System" tag instead of # creating a tag with a single path in it. # - The order of tags in this list defines the order in the menu. tags: # Primary objects - name: "Container" x-displayName: "Containers" description: | Create and manage containers. - name: "Image" x-displayName: "Images" - name: "Network" x-displayName: "Networks" description: | Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/network/) for more information. - name: "Volume" x-displayName: "Volumes" description: | Create and manage persistent storage that can be attached to containers. - name: "Exec" x-displayName: "Exec" description: | Run new commands inside running containers. Refer to the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. # Swarm things - name: "Swarm" x-displayName: "Swarm" description: | Engines can be clustered together in a swarm. Refer to the [swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. - name: "Node" x-displayName: "Nodes" description: | Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Service" x-displayName: "Services" description: | Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Task" x-displayName: "Tasks" description: | A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. - name: "Secret" x-displayName: "Secrets" description: | Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. - name: "Config" x-displayName: "Configs" description: | Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work. # System things - name: "Plugin" x-displayName: "Plugins" - name: "System" x-displayName: "System" definitions: Port: type: "object" description: "An open port on a container" required: [PrivatePort, Type] properties: IP: type: "string" format: "ip-address" description: "Host IP address that the container's port is mapped to" PrivatePort: type: "integer" format: "uint16" x-nullable: false description: "Port on the container" PublicPort: type: "integer" format: "uint16" description: "Port exposed on the host" Type: type: "string" x-nullable: false enum: ["tcp", "udp", "sctp"] example: PrivatePort: 8080 PublicPort: 80 Type: "tcp" MountPoint: type: "object" description: "A mount point inside a container" properties: Type: type: "string" Name: type: "string" Source: type: "string" Destination: type: "string" Driver: type: "string" Mode: type: "string" RW: type: "boolean" Propagation: type: "string" DeviceMapping: type: "object" description: "A device mapping between the host and container" properties: PathOnHost: type: "string" PathInContainer: type: "string" CgroupPermissions: type: "string" example: PathOnHost: "/dev/deviceName" PathInContainer: "/dev/deviceName" CgroupPermissions: "mrw" DeviceRequest: type: "object" description: "A request for devices to be sent to device drivers" properties: Driver: type: "string" example: "nvidia" Count: type: "integer" example: -1 DeviceIDs: type: "array" items: type: "string" example: - "0" - "1" - "GPU-fef8089b-4820-abfc-e83e-94318197576e" Capabilities: description: | A list of capabilities; an OR list of AND lists of capabilities. type: "array" items: type: "array" items: type: "string" example: # gpu AND nvidia AND compute - ["gpu", "nvidia", "compute"] Options: description: | Driver-specific options, specified as a key/value pairs. These options are passed directly to the driver. type: "object" additionalProperties: type: "string" ThrottleDevice: type: "object" properties: Path: description: "Device path" type: "string" Rate: description: "Rate" type: "integer" format: "int64" minimum: 0 Mount: type: "object" properties: Target: description: "Container path." type: "string" Source: description: "Mount source (e.g. a volume name, a host path)." type: "string" Type: description: | The mount type. Available types: - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. type: "string" enum: - "bind" - "volume" - "tmpfs" - "npipe" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" Consistency: description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." type: "string" BindOptions: description: "Optional configuration for the `bind` type." type: "object" properties: Propagation: description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." type: "string" enum: - "private" - "rprivate" - "shared" - "rshared" - "slave" - "rslave" NonRecursive: description: "Disable recursive bind mount." type: "boolean" default: false VolumeOptions: description: "Optional configuration for the `volume` type." type: "object" properties: NoCopy: description: "Populate volume with data from the target." type: "boolean" default: false Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" DriverConfig: description: "Map of driver specific options" type: "object" properties: Name: description: "Name of the driver to use to create the volume." type: "string" Options: description: "key/value map of driver specific options." type: "object" additionalProperties: type: "string" TmpfsOptions: description: "Optional configuration for the `tmpfs` type." type: "object" properties: SizeBytes: description: "The size for the tmpfs mount in bytes." type: "integer" format: "int64" Mode: description: "The permission mode for the tmpfs mount in an integer." type: "integer" RestartPolicy: description: | The behavior to apply when the container exits. The default is not to restart. An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. type: "object" properties: Name: type: "string" description: | - Empty string means not to restart - `no` Do not automatically restart - `always` Always restart - `unless-stopped` Restart always except when the user has manually stopped the container - `on-failure` Restart only when the container exit code is non-zero enum: - "" - "no" - "always" - "unless-stopped" - "on-failure" MaximumRetryCount: type: "integer" description: | If `on-failure` is used, the number of times to retry before giving up. Resources: description: "A container's resources (cgroups config, ulimits, etc)" type: "object" properties: # Applicable to all platforms CpuShares: description: | An integer value representing this container's relative CPU weight versus other containers. type: "integer" Memory: description: "Memory limit in bytes." type: "integer" format: "int64" default: 0 # Applicable to UNIX platforms CgroupParent: description: | Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. type: "string" BlkioWeight: description: "Block IO weight (relative weight)." type: "integer" minimum: 0 maximum: 1000 BlkioWeightDevice: description: | Block IO weight (relative device weight) in the form: ``` [{"Path": "device_path", "Weight": weight}] ``` type: "array" items: type: "object" properties: Path: type: "string" Weight: type: "integer" minimum: 0 BlkioDeviceReadBps: description: | Limit read rate (bytes per second) from a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteBps: description: | Limit write rate (bytes per second) to a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceReadIOps: description: | Limit read rate (IO per second) from a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteIOps: description: | Limit write rate (IO per second) to a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" CpuPeriod: description: "The length of a CPU period in microseconds." type: "integer" format: "int64" CpuQuota: description: | Microseconds of CPU time that the container can get in a CPU period. type: "integer" format: "int64" CpuRealtimePeriod: description: | The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpuRealtimeRuntime: description: | The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpusetCpus: description: | CPUs in which to allow execution (e.g., `0-3`, `0,1`). type: "string" example: "0-3" CpusetMems: description: | Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. type: "string" Devices: description: "A list of devices to add to the container." type: "array" items: $ref: "#/definitions/DeviceMapping" DeviceCgroupRules: description: "a list of cgroup rules to apply to the container" type: "array" items: type: "string" example: "c 13:* rwm" DeviceRequests: description: | A list of requests for devices to be sent to device drivers. type: "array" items: $ref: "#/definitions/DeviceRequest" KernelMemory: description: | Kernel memory limit in bytes. <p><br /></p> > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated > `kmem.limit_in_bytes`. type: "integer" format: "int64" example: 209715200 KernelMemoryTCP: description: "Hard limit for kernel TCP buffer memory (in bytes)." type: "integer" format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" format: "int64" MemorySwap: description: | Total memory limit (memory + swap). Set as `-1` to enable unlimited swap. type: "integer" format: "int64" MemorySwappiness: description: | Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. type: "integer" format: "int64" minimum: 0 maximum: 100 NanoCpus: description: "CPU quota in units of 10<sup>-9</sup> CPUs." type: "integer" format: "int64" OomKillDisable: description: "Disable OOM Killer for the container." type: "boolean" Init: description: | Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. type: "boolean" x-nullable: true PidsLimit: description: | Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. type: "integer" format: "int64" x-nullable: true Ulimits: description: | A list of resource limits to set in the container. For example: ``` {"Name": "nofile", "Soft": 1024, "Hard": 2048} ``` type: "array" items: type: "object" properties: Name: description: "Name of ulimit" type: "string" Soft: description: "Soft limit" type: "integer" Hard: description: "Hard limit" type: "integer" # Applicable to Windows CpuCount: description: | The number of usable CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" CpuPercent: description: | The usable percentage of the available CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" IOMaximumIOps: description: "Maximum IOps for the container system drive (Windows only)" type: "integer" format: "int64" IOMaximumBandwidth: description: | Maximum IO in bytes per second for the container system drive (Windows only). type: "integer" format: "int64" Limit: description: | An object describing a limit on resources which can be requested by a task. type: "object" properties: NanoCPUs: type: "integer" format: "int64" example: 4000000000 MemoryBytes: type: "integer" format: "int64" example: 8272408576 Pids: description: | Limits the maximum number of PIDs in the container. Set `0` for unlimited. type: "integer" format: "int64" default: 0 example: 100 ResourceObject: description: | An object describing the resources which can be advertised by a node and requested by a task. type: "object" properties: NanoCPUs: type: "integer" format: "int64" example: 4000000000 MemoryBytes: type: "integer" format: "int64" example: 8272408576 GenericResources: $ref: "#/definitions/GenericResources" GenericResources: description: | User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`). type: "array" items: type: "object" properties: NamedResourceSpec: type: "object" properties: Kind: type: "string" Value: type: "string" DiscreteResourceSpec: type: "object" properties: Kind: type: "string" Value: type: "integer" format: "int64" example: - DiscreteResourceSpec: Kind: "SSD" Value: 3 - NamedResourceSpec: Kind: "GPU" Value: "UUID1" - NamedResourceSpec: Kind: "GPU" Value: "UUID2" HealthConfig: description: "A test to perform to check that the container is healthy." type: "object" properties: Test: description: | The test to perform. Possible values are: - `[]` inherit healthcheck from image or parent image - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell type: "array" items: type: "string" Interval: description: | The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Timeout: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Retries: description: | The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit. type: "integer" StartPeriod: description: | Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Health: description: | Health stores information about the container's healthcheck results. type: "object" properties: Status: description: | Status is one of `none`, `starting`, `healthy` or `unhealthy` - "none" Indicates there is no healthcheck - "starting" Starting indicates that the container is not yet ready - "healthy" Healthy indicates that the container is running correctly - "unhealthy" Unhealthy indicates that the container has a problem type: "string" enum: - "none" - "starting" - "healthy" - "unhealthy" example: "healthy" FailingStreak: description: "FailingStreak is the number of consecutive failures" type: "integer" example: 0 Log: type: "array" description: | Log contains the last few results (oldest first) items: x-nullable: true $ref: "#/definitions/HealthcheckResult" HealthcheckResult: description: | HealthcheckResult stores information about a single run of a healthcheck probe type: "object" properties: Start: description: | Date and time at which this check started in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "date-time" example: "2020-01-04T10:44:24.496525531Z" End: description: | Date and time at which this check ended in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2020-01-04T10:45:21.364524523Z" ExitCode: description: | ExitCode meanings: - `0` healthy - `1` unhealthy - `2` reserved (considered unhealthy) - other values: error running probe type: "integer" example: 0 Output: description: "Output from last check" type: "string" HostConfig: description: "Container configuration that depends on the host we are running on" allOf: - $ref: "#/definitions/Resources" - type: "object" properties: # Applicable to all platforms Binds: type: "array" description: | A list of volume bindings for this container. Each volume binding is a string in one of these forms: - `host-src:container-dest[:options]` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - `volume-name:container-dest[:options]` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. `options` is an optional, comma-delimited list of: - `nocopy` disables automatic copying of data from the container path to the volume. The `nocopy` flag only applies to named volumes. - `[ro|rw]` mounts a volume read-only or read-write, respectively. If omitted or set to `rw`, volumes are mounted read-write. - `[z|Z]` applies SELinux labels to allow or deny multiple containers to read and write to the same volume. - `z`: a _shared_ content label is applied to the content. This label indicates that multiple containers can share the volume content, for both reading and writing. - `Z`: a _private unshared_ label is applied to the content. This label indicates that only the current container can use a private volume. Labeling systems such as SELinux require proper labels to be placed on volume content that is mounted into a container. Without a label, the security system can prevent a container's processes from using the content. By default, the labels set by the host operating system are not modified. - `[[r]shared|[r]slave|[r]private]` specifies mount [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). This only applies to bind-mounted volumes, not internal volumes or named volumes. Mount propagation requires the source mount point (the location where the source directory is mounted in the host operating system) to have the correct propagation properties. For shared volumes, the source mount point must be set to `shared`. For slave volumes, the mount must be set to either `shared` or `slave`. items: type: "string" ContainerIDFile: type: "string" description: "Path to a file where the container ID is written" LogConfig: type: "object" description: "The logging configuration for this container" properties: Type: type: "string" enum: - "json-file" - "syslog" - "journald" - "gelf" - "fluentd" - "awslogs" - "splunk" - "etwlogs" - "none" Config: type: "object" additionalProperties: type: "string" NetworkMode: type: "string" description: | Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken as a custom network's name to which this container should connect to. PortBindings: $ref: "#/definitions/PortMap" RestartPolicy: $ref: "#/definitions/RestartPolicy" AutoRemove: type: "boolean" description: | Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set. VolumeDriver: type: "string" description: "Driver that this container uses to mount volumes." VolumesFrom: type: "array" description: | A list of volumes to inherit from another container, specified in the form `<container name>[:<ro|rw>]`. items: type: "string" Mounts: description: | Specification for mounts to be added to the container. type: "array" items: $ref: "#/definitions/Mount" # Applicable to UNIX platforms CapAdd: type: "array" description: | A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'. items: type: "string" CapDrop: type: "array" description: | A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'. items: type: "string" CgroupnsMode: type: "string" enum: - "private" - "host" description: | cgroup namespace mode for the container. Possible values are: - `"private"`: the container runs in its own private cgroup namespace - `"host"`: use the host system's cgroup namespace If not specified, the daemon default is used, which can either be `"private"` or `"host"`, depending on daemon version, kernel support and configuration. Dns: type: "array" description: "A list of DNS servers for the container to use." items: type: "string" DnsOptions: type: "array" description: "A list of DNS options." items: type: "string" DnsSearch: type: "array" description: "A list of DNS search domains." items: type: "string" ExtraHosts: type: "array" description: | A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. items: type: "string" GroupAdd: type: "array" description: | A list of additional groups that the container process will run as. items: type: "string" IpcMode: type: "string" description: | IPC sharing mode for the container. Possible values are: - `"none"`: own private IPC namespace, with /dev/shm not mounted - `"private"`: own private IPC namespace - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers - `"container:<name|id>"`: join another (shareable) container's IPC namespace - `"host"`: use the host system's IPC namespace If not specified, daemon default is used, which can either be `"private"` or `"shareable"`, depending on daemon version and configuration. Cgroup: type: "string" description: "Cgroup to use for the container." Links: type: "array" description: | A list of links for the container in the form `container_name:alias`. items: type: "string" OomScoreAdj: type: "integer" description: | An integer value containing the score given to the container in order to tune OOM killer preferences. example: 500 PidMode: type: "string" description: | Set the PID (Process) Namespace mode for the container. It can be either: - `"container:<name|id>"`: joins another container's PID namespace - `"host"`: use the host's PID namespace inside the container Privileged: type: "boolean" description: "Gives the container full access to the host." PublishAllPorts: type: "boolean" description: | Allocates an ephemeral host port for all of a container's exposed ports. Ports are de-allocated when the container stops and allocated when the container starts. The allocated port might be changed when restarting the container. The port is selected from the ephemeral port range that depends on the kernel. For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. ReadonlyRootfs: type: "boolean" description: "Mount the container's root filesystem as read only." SecurityOpt: type: "array" description: "A list of string values to customize labels for MLS systems, such as SELinux." items: type: "string" StorageOpt: type: "object" description: | Storage driver options for this container, in the form `{"size": "120G"}`. additionalProperties: type: "string" Tmpfs: type: "object" description: | A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: ``` { "/run": "rw,noexec,nosuid,size=65536k" } ``` additionalProperties: type: "string" UTSMode: type: "string" description: "UTS namespace to use for the container." UsernsMode: type: "string" description: | Sets the usernamespace mode for the container when usernamespace remapping option is enabled. ShmSize: type: "integer" description: | Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. minimum: 0 Sysctls: type: "object" description: | A list of kernel parameters (sysctls) to set in the container. For example: ``` {"net.ipv4.ip_forward": "1"} ``` additionalProperties: type: "string" Runtime: type: "string" description: "Runtime to use with this container." # Applicable to Windows ConsoleSize: type: "array" description: | Initial console size, as an `[height, width]` array. (Windows only) minItems: 2 maxItems: 2 items: type: "integer" minimum: 0 Isolation: type: "string" description: | Isolation technology of the container. (Windows only) enum: - "default" - "process" - "hyperv" MaskedPaths: type: "array" description: | The list of paths to be masked inside the container (this overrides the default set of paths). items: type: "string" ReadonlyPaths: type: "array" description: | The list of paths to be set as read-only inside the container (this overrides the default set of paths). items: type: "string" ContainerConfig: description: "Configuration for a container that is portable between hosts" type: "object" properties: Hostname: description: "The hostname to use for the container, as a valid RFC 1123 hostname." type: "string" Domainname: description: "The domain name to use for the container." type: "string" User: description: "The user that commands are run as inside the container." type: "string" AttachStdin: description: "Whether to attach to `stdin`." type: "boolean" default: false AttachStdout: description: "Whether to attach to `stdout`." type: "boolean" default: true AttachStderr: description: "Whether to attach to `stderr`." type: "boolean" default: true ExposedPorts: description: | An object mapping ports to an empty object in the form: `{"<port>/<tcp|udp|sctp>": {}}` type: "object" additionalProperties: type: "object" enum: - {} default: {} Tty: description: | Attach standard streams to a TTY, including `stdin` if it is not closed. type: "boolean" default: false OpenStdin: description: "Open `stdin`" type: "boolean" default: false StdinOnce: description: "Close `stdin` after one attached client disconnects" type: "boolean" default: false Env: description: | A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. type: "array" items: type: "string" Cmd: description: | Command to run specified as a string or an array of strings. type: "array" items: type: "string" Healthcheck: $ref: "#/definitions/HealthConfig" ArgsEscaped: description: "Command is already escaped (Windows only)" type: "boolean" Image: description: | The name of the image to use when creating the container/ type: "string" Volumes: description: | An object mapping mount point paths inside the container to empty objects. type: "object" additionalProperties: type: "object" enum: - {} default: {} WorkingDir: description: "The working directory for commands to run in." type: "string" Entrypoint: description: | The entry point for the container as a string or an array of strings. If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). type: "array" items: type: "string" NetworkDisabled: description: "Disable networking for the container." type: "boolean" MacAddress: description: "MAC address of the container." type: "string" OnBuild: description: | `ONBUILD` metadata that were defined in the image's `Dockerfile`. type: "array" items: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" StopSignal: description: | Signal to stop a container as a string or unsigned integer. type: "string" default: "SIGTERM" StopTimeout: description: "Timeout to stop a container in seconds." type: "integer" default: 10 Shell: description: | Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. type: "array" items: type: "string" NetworkingConfig: description: | NetworkingConfig represents the container's networking configuration for each of its interfaces. It is used for the networking configs specified in the `docker create` and `docker network connect` commands. type: "object" properties: EndpointsConfig: description: | A mapping of network name to endpoint configuration for that network. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" example: # putting an example here, instead of using the example values from # /definitions/EndpointSettings, because containers/create currently # does not support attaching to multiple networks, so the example request # would be confusing if it showed that multiple networks can be contained # in the EndpointsConfig. # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) EndpointsConfig: isolated_nw: IPAMConfig: IPv4Address: "172.20.30.33" IPv6Address: "2001:db8:abcd::3033" LinkLocalIPs: - "169.254.34.68" - "fe80::3468" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" NetworkSettings: description: "NetworkSettings exposes the network settings in the API" type: "object" properties: Bridge: description: Name of the network's bridge (for example, `docker0`). type: "string" example: "docker0" SandboxID: description: SandboxID uniquely represents a container's network stack. type: "string" example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" HairpinMode: description: | Indicates if hairpin NAT should be enabled on the virtual interface. type: "boolean" example: false LinkLocalIPv6Address: description: IPv6 unicast address using the link-local prefix. type: "string" example: "fe80::42:acff:fe11:1" LinkLocalIPv6PrefixLen: description: Prefix length of the IPv6 unicast address. type: "integer" example: "64" Ports: $ref: "#/definitions/PortMap" SandboxKey: description: SandboxKey identifies the sandbox type: "string" example: "/var/run/docker/netns/8ab54b426c38" # TODO is SecondaryIPAddresses actually used? SecondaryIPAddresses: description: "" type: "array" items: $ref: "#/definitions/Address" x-nullable: true # TODO is SecondaryIPv6Addresses actually used? SecondaryIPv6Addresses: description: "" type: "array" items: $ref: "#/definitions/Address" x-nullable: true # TODO properties below are part of DefaultNetworkSettings, which is # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 EndpointID: description: | EndpointID uniquely represents a service endpoint in a Sandbox. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" Gateway: description: | Gateway address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "172.17.0.1" GlobalIPv6Address: description: | Global IPv6 address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "2001:db8::5689" GlobalIPv6PrefixLen: description: | Mask length of the global IPv6 address. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "integer" example: 64 IPAddress: description: | IPv4 address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "172.17.0.4" IPPrefixLen: description: | Mask length of the IPv4 address. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "integer" example: 16 IPv6Gateway: description: | IPv6 gateway address for this network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "2001:db8:2::100" MacAddress: description: | MAC address for the container on the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "02:42:ac:11:00:04" Networks: description: | Information about all networks that the container is connected to. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" Address: description: Address represents an IPv4 or IPv6 IP address. type: "object" properties: Addr: description: IP address. type: "string" PrefixLen: description: Mask length of the IP address. type: "integer" PortMap: description: | PortMap describes the mapping of container ports to host ports, using the container's port-number and protocol as key in the format `<port>/<protocol>`, for example, `80/udp`. If a container's port is mapped for multiple protocols, separate entries are added to the mapping table. type: "object" additionalProperties: type: "array" x-nullable: true items: $ref: "#/definitions/PortBinding" example: "443/tcp": - HostIp: "127.0.0.1" HostPort: "4443" "80/tcp": - HostIp: "0.0.0.0" HostPort: "80" - HostIp: "0.0.0.0" HostPort: "8080" "80/udp": - HostIp: "0.0.0.0" HostPort: "80" "53/udp": - HostIp: "0.0.0.0" HostPort: "53" "2377/tcp": null PortBinding: description: | PortBinding represents a binding between a host IP address and a host port. type: "object" properties: HostIp: description: "Host IP address that the container's port is mapped to." type: "string" example: "127.0.0.1" HostPort: description: "Host port number that the container's port is mapped to." type: "string" example: "4443" GraphDriverData: description: "Information about a container's graph driver." type: "object" required: [Name, Data] properties: Name: type: "string" x-nullable: false Data: type: "object" x-nullable: false additionalProperties: type: "string" Image: type: "object" required: - Id - Parent - Comment - Created - Container - DockerVersion - Author - Architecture - Os - Size - VirtualSize - GraphDriver - RootFS properties: Id: type: "string" x-nullable: false RepoTags: type: "array" items: type: "string" RepoDigests: type: "array" items: type: "string" Parent: type: "string" x-nullable: false Comment: type: "string" x-nullable: false Created: type: "string" x-nullable: false Container: type: "string" x-nullable: false ContainerConfig: $ref: "#/definitions/ContainerConfig" DockerVersion: type: "string" x-nullable: false Author: type: "string" x-nullable: false Config: $ref: "#/definitions/ContainerConfig" Architecture: type: "string" x-nullable: false Os: type: "string" x-nullable: false OsVersion: type: "string" Size: type: "integer" format: "int64" x-nullable: false VirtualSize: type: "integer" format: "int64" x-nullable: false GraphDriver: $ref: "#/definitions/GraphDriverData" RootFS: type: "object" required: [Type] properties: Type: type: "string" x-nullable: false Layers: type: "array" items: type: "string" BaseLayer: type: "string" Metadata: type: "object" properties: LastTagTime: type: "string" format: "dateTime" ImageSummary: type: "object" required: - Id - ParentId - RepoTags - RepoDigests - Created - Size - SharedSize - VirtualSize - Labels - Containers properties: Id: type: "string" x-nullable: false ParentId: type: "string" x-nullable: false RepoTags: type: "array" x-nullable: false items: type: "string" RepoDigests: type: "array" x-nullable: false items: type: "string" Created: type: "integer" x-nullable: false Size: type: "integer" x-nullable: false SharedSize: type: "integer" x-nullable: false VirtualSize: type: "integer" x-nullable: false Labels: type: "object" x-nullable: false additionalProperties: type: "string" Containers: x-nullable: false type: "integer" AuthConfig: type: "object" properties: username: type: "string" password: type: "string" email: type: "string" serveraddress: type: "string" example: username: "hannibal" password: "xxxx" serveraddress: "https://index.docker.io/v1/" ProcessConfig: type: "object" properties: privileged: type: "boolean" user: type: "string" tty: type: "boolean" entrypoint: type: "string" arguments: type: "array" items: type: "string" Volume: type: "object" required: [Name, Driver, Mountpoint, Labels, Scope, Options] properties: Name: type: "string" description: "Name of the volume." x-nullable: false Driver: type: "string" description: "Name of the volume driver used by the volume." x-nullable: false Mountpoint: type: "string" description: "Mount path of the volume on the host." x-nullable: false CreatedAt: type: "string" format: "dateTime" description: "Date/Time the volume was created." Status: type: "object" description: | Low-level details about the volume, provided by the volume driver. Details are returned as a map with key/value pairs: `{"key":"value","key2":"value2"}`. The `Status` field is optional, and is omitted if the volume driver does not support this feature. additionalProperties: type: "object" Labels: type: "object" description: "User-defined key/value metadata." x-nullable: false additionalProperties: type: "string" Scope: type: "string" description: | The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. default: "local" x-nullable: false enum: ["local", "global"] Options: type: "object" description: | The driver specific options used when creating the volume. additionalProperties: type: "string" UsageData: type: "object" x-nullable: true required: [Size, RefCount] description: | Usage details about the volume. This information is used by the `GET /system/df` endpoint, and omitted in other endpoints. properties: Size: type: "integer" default: -1 description: | Amount of disk space used by the volume (in bytes). This information is only available for volumes created with the `"local"` volume driver. For volumes created with other volume drivers, this field is set to `-1` ("not available") x-nullable: false RefCount: type: "integer" default: -1 description: | The number of containers referencing this volume. This field is set to `-1` if the reference-count is not available. x-nullable: false example: Name: "tardis" Driver: "custom" Mountpoint: "/var/lib/docker/volumes/tardis" Status: hello: "world" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Scope: "local" CreatedAt: "2016-06-07T20:31:11.853781916Z" Network: type: "object" properties: Name: type: "string" Id: type: "string" Created: type: "string" format: "dateTime" Scope: type: "string" Driver: type: "string" EnableIPv6: type: "boolean" IPAM: $ref: "#/definitions/IPAM" Internal: type: "boolean" Attachable: type: "boolean" Ingress: type: "boolean" Containers: type: "object" additionalProperties: $ref: "#/definitions/NetworkContainer" Options: type: "object" additionalProperties: type: "string" Labels: type: "object" additionalProperties: type: "string" example: Name: "net01" Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" Created: "2016-10-19T04:33:30.360899459Z" Scope: "local" Driver: "bridge" EnableIPv6: false IPAM: Driver: "default" Config: - Subnet: "172.19.0.0/16" Gateway: "172.19.0.1" Options: foo: "bar" Internal: false Attachable: false Ingress: false Containers: 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: Name: "test" EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" MacAddress: "02:42:ac:13:00:02" IPv4Address: "172.19.0.2/16" IPv6Address: "" Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" IPAM: type: "object" properties: Driver: description: "Name of the IPAM driver to use." type: "string" default: "default" Config: description: | List of IPAM configuration options, specified as a map: ``` {"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>} ``` type: "array" items: type: "object" additionalProperties: type: "string" Options: description: "Driver-specific options, specified as a map." type: "object" additionalProperties: type: "string" NetworkContainer: type: "object" properties: Name: type: "string" EndpointID: type: "string" MacAddress: type: "string" IPv4Address: type: "string" IPv6Address: type: "string" BuildInfo: type: "object" properties: id: type: "string" stream: type: "string" error: type: "string" errorDetail: $ref: "#/definitions/ErrorDetail" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" aux: $ref: "#/definitions/ImageID" BuildCache: type: "object" properties: ID: type: "string" Parent: type: "string" Type: type: "string" Description: type: "string" InUse: type: "boolean" Shared: type: "boolean" Size: description: | Amount of disk space used by the build cache (in bytes). type: "integer" CreatedAt: description: | Date and time at which the build cache was created in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" LastUsedAt: description: | Date and time at which the build cache was last used in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" x-nullable: true example: "2017-08-09T07:09:37.632105588Z" UsageCount: type: "integer" ImageID: type: "object" description: "Image ID or Digest" properties: ID: type: "string" example: ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" CreateImageInfo: type: "object" properties: id: type: "string" error: type: "string" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" PushImageInfo: type: "object" properties: error: type: "string" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" ErrorDetail: type: "object" properties: code: type: "integer" message: type: "string" ProgressDetail: type: "object" properties: current: type: "integer" total: type: "integer" ErrorResponse: description: "Represents an error." type: "object" required: ["message"] properties: message: description: "The error message." type: "string" x-nullable: false example: message: "Something went wrong." IdResponse: description: "Response to an API call that returns just an Id" type: "object" required: ["Id"] properties: Id: description: "The id of the newly created object." type: "string" x-nullable: false EndpointSettings: description: "Configuration for a network endpoint." type: "object" properties: # Configurations IPAMConfig: $ref: "#/definitions/EndpointIPAMConfig" Links: type: "array" items: type: "string" example: - "container_1" - "container_2" Aliases: type: "array" items: type: "string" example: - "server_x" - "server_y" # Operational data NetworkID: description: | Unique ID of the network. type: "string" example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" EndpointID: description: | Unique ID for the service endpoint in a Sandbox. type: "string" example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" Gateway: description: | Gateway address for this network. type: "string" example: "172.17.0.1" IPAddress: description: | IPv4 address. type: "string" example: "172.17.0.4" IPPrefixLen: description: | Mask length of the IPv4 address. type: "integer" example: 16 IPv6Gateway: description: | IPv6 gateway address. type: "string" example: "2001:db8:2::100" GlobalIPv6Address: description: | Global IPv6 address. type: "string" example: "2001:db8::5689" GlobalIPv6PrefixLen: description: | Mask length of the global IPv6 address. type: "integer" format: "int64" example: 64 MacAddress: description: | MAC address for the endpoint on this network. type: "string" example: "02:42:ac:11:00:04" DriverOpts: description: | DriverOpts is a mapping of driver options and values. These options are passed directly to the driver and are driver specific. type: "object" x-nullable: true additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" EndpointIPAMConfig: description: | EndpointIPAMConfig represents an endpoint's IPAM configuration. type: "object" x-nullable: true properties: IPv4Address: type: "string" example: "172.20.30.33" IPv6Address: type: "string" example: "2001:db8:abcd::3033" LinkLocalIPs: type: "array" items: type: "string" example: - "169.254.34.68" - "fe80::3468" PluginMount: type: "object" x-nullable: false required: [Name, Description, Settable, Source, Destination, Type, Options] properties: Name: type: "string" x-nullable: false example: "some-mount" Description: type: "string" x-nullable: false example: "This is a mount that's used by the plugin." Settable: type: "array" items: type: "string" Source: type: "string" example: "/var/lib/docker/plugins/" Destination: type: "string" x-nullable: false example: "/mnt/state" Type: type: "string" x-nullable: false example: "bind" Options: type: "array" items: type: "string" example: - "rbind" - "rw" PluginDevice: type: "object" required: [Name, Description, Settable, Path] x-nullable: false properties: Name: type: "string" x-nullable: false Description: type: "string" x-nullable: false Settable: type: "array" items: type: "string" Path: type: "string" example: "/dev/fuse" PluginEnv: type: "object" x-nullable: false required: [Name, Description, Settable, Value] properties: Name: x-nullable: false type: "string" Description: x-nullable: false type: "string" Settable: type: "array" items: type: "string" Value: type: "string" PluginInterfaceType: type: "object" x-nullable: false required: [Prefix, Capability, Version] properties: Prefix: type: "string" x-nullable: false Capability: type: "string" x-nullable: false Version: type: "string" x-nullable: false PluginPrivilegeItem: description: | Describes a permission the user has to accept upon installing the plugin. type: "object" properties: Name: type: "string" example: "network" Description: type: "string" Value: type: "array" items: type: "string" example: - "host" Plugin: description: "A plugin for the Engine API" type: "object" required: [Settings, Enabled, Config, Name] properties: Id: type: "string" example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" Name: type: "string" x-nullable: false example: "tiborvass/sample-volume-plugin" Enabled: description: True if the plugin is running. False if the plugin is not running, only installed. type: "boolean" x-nullable: false example: true Settings: description: "Settings that can be modified by users." type: "object" x-nullable: false required: [Args, Devices, Env, Mounts] properties: Mounts: type: "array" items: $ref: "#/definitions/PluginMount" Env: type: "array" items: type: "string" example: - "DEBUG=0" Args: type: "array" items: type: "string" Devices: type: "array" items: $ref: "#/definitions/PluginDevice" PluginReference: description: "plugin remote reference used to push/pull the plugin" type: "string" x-nullable: false example: "localhost:5000/tiborvass/sample-volume-plugin:latest" Config: description: "The config of a plugin." type: "object" x-nullable: false required: - Description - Documentation - Interface - Entrypoint - WorkDir - Network - Linux - PidHost - PropagatedMount - IpcHost - Mounts - Env - Args properties: DockerVersion: description: "Docker Version used to create the plugin" type: "string" x-nullable: false example: "17.06.0-ce" Description: type: "string" x-nullable: false example: "A sample volume plugin for Docker" Documentation: type: "string" x-nullable: false example: "https://docs.docker.com/engine/extend/plugins/" Interface: description: "The interface between Docker and the plugin" x-nullable: false type: "object" required: [Types, Socket] properties: Types: type: "array" items: $ref: "#/definitions/PluginInterfaceType" example: - "docker.volumedriver/1.0" Socket: type: "string" x-nullable: false example: "plugins.sock" ProtocolScheme: type: "string" example: "some.protocol/v1.0" description: "Protocol to use for clients connecting to the plugin." enum: - "" - "moby.plugins.http/v1" Entrypoint: type: "array" items: type: "string" example: - "/usr/bin/sample-volume-plugin" - "/data" WorkDir: type: "string" x-nullable: false example: "/bin/" User: type: "object" x-nullable: false properties: UID: type: "integer" format: "uint32" example: 1000 GID: type: "integer" format: "uint32" example: 1000 Network: type: "object" x-nullable: false required: [Type] properties: Type: x-nullable: false type: "string" example: "host" Linux: type: "object" x-nullable: false required: [Capabilities, AllowAllDevices, Devices] properties: Capabilities: type: "array" items: type: "string" example: - "CAP_SYS_ADMIN" - "CAP_SYSLOG" AllowAllDevices: type: "boolean" x-nullable: false example: false Devices: type: "array" items: $ref: "#/definitions/PluginDevice" PropagatedMount: type: "string" x-nullable: false example: "/mnt/volumes" IpcHost: type: "boolean" x-nullable: false example: false PidHost: type: "boolean" x-nullable: false example: false Mounts: type: "array" items: $ref: "#/definitions/PluginMount" Env: type: "array" items: $ref: "#/definitions/PluginEnv" example: - Name: "DEBUG" Description: "If set, prints debug messages" Settable: null Value: "0" Args: type: "object" x-nullable: false required: [Name, Description, Settable, Value] properties: Name: x-nullable: false type: "string" example: "args" Description: x-nullable: false type: "string" example: "command line arguments" Settable: type: "array" items: type: "string" Value: type: "array" items: type: "string" rootfs: type: "object" properties: type: type: "string" example: "layers" diff_ids: type: "array" items: type: "string" example: - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" ObjectVersion: description: | The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. The client must send the version number along with the modified specification when updating these objects. This approach ensures safe concurrency and determinism in that the change on the object may not be applied if the version number has changed from the last read. In other words, if two update requests specify the same base version, only one of the requests can succeed. As a result, two separate update requests that happen at the same time will not unintentionally overwrite each other. type: "object" properties: Index: type: "integer" format: "uint64" example: 373531 NodeSpec: type: "object" properties: Name: description: "Name for the node." type: "string" example: "my-node" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Role: description: "Role of the node." type: "string" enum: - "worker" - "manager" example: "manager" Availability: description: "Availability of the node." type: "string" enum: - "active" - "pause" - "drain" example: "active" example: Availability: "active" Name: "node-name" Role: "manager" Labels: foo: "bar" Node: type: "object" properties: ID: type: "string" example: "24ifsmvkjbyhk" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: description: | Date and time at which the node was added to the swarm in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: description: | Date and time at which the node was last updated in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/NodeSpec" Description: $ref: "#/definitions/NodeDescription" Status: $ref: "#/definitions/NodeStatus" ManagerStatus: $ref: "#/definitions/ManagerStatus" NodeDescription: description: | NodeDescription encapsulates the properties of the Node as reported by the agent. type: "object" properties: Hostname: type: "string" example: "bf3067039e47" Platform: $ref: "#/definitions/Platform" Resources: $ref: "#/definitions/ResourceObject" Engine: $ref: "#/definitions/EngineDescription" TLSInfo: $ref: "#/definitions/TLSInfo" Platform: description: | Platform represents the platform (Arch/OS). type: "object" properties: Architecture: description: | Architecture represents the hardware architecture (for example, `x86_64`). type: "string" example: "x86_64" OS: description: | OS represents the Operating System (for example, `linux` or `windows`). type: "string" example: "linux" EngineDescription: description: "EngineDescription provides information about an engine." type: "object" properties: EngineVersion: type: "string" example: "17.06.0" Labels: type: "object" additionalProperties: type: "string" example: foo: "bar" Plugins: type: "array" items: type: "object" properties: Type: type: "string" Name: type: "string" example: - Type: "Log" Name: "awslogs" - Type: "Log" Name: "fluentd" - Type: "Log" Name: "gcplogs" - Type: "Log" Name: "gelf" - Type: "Log" Name: "journald" - Type: "Log" Name: "json-file" - Type: "Log" Name: "logentries" - Type: "Log" Name: "splunk" - Type: "Log" Name: "syslog" - Type: "Network" Name: "bridge" - Type: "Network" Name: "host" - Type: "Network" Name: "ipvlan" - Type: "Network" Name: "macvlan" - Type: "Network" Name: "null" - Type: "Network" Name: "overlay" - Type: "Volume" Name: "local" - Type: "Volume" Name: "localhost:5000/vieux/sshfs:latest" - Type: "Volume" Name: "vieux/sshfs:latest" TLSInfo: description: | Information about the issuer of leaf TLS certificates and the trusted root CA certificate. type: "object" properties: TrustRoot: description: | The root CA certificate(s) that are used to validate leaf TLS certificates. type: "string" CertIssuerSubject: description: The base64-url-safe-encoded raw subject bytes of the issuer. type: "string" CertIssuerPublicKey: description: | The base64-url-safe-encoded raw public key bytes of the issuer. type: "string" example: TrustRoot: | -----BEGIN CERTIFICATE----- MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H -----END CERTIFICATE----- CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" NodeStatus: description: | NodeStatus represents the status of a node. It provides the current status of the node, as seen by the manager. type: "object" properties: State: $ref: "#/definitions/NodeState" Message: type: "string" example: "" Addr: description: "IP address of the node." type: "string" example: "172.17.0.2" NodeState: description: "NodeState represents the state of a node." type: "string" enum: - "unknown" - "down" - "ready" - "disconnected" example: "ready" ManagerStatus: description: | ManagerStatus represents the status of a manager. It provides the current status of a node's manager component, if the node is a manager. x-nullable: true type: "object" properties: Leader: type: "boolean" default: false example: true Reachability: $ref: "#/definitions/Reachability" Addr: description: | The IP address and port at which the manager is reachable. type: "string" example: "10.0.0.46:2377" Reachability: description: "Reachability represents the reachability of a node." type: "string" enum: - "unknown" - "unreachable" - "reachable" example: "reachable" SwarmSpec: description: "User modifiable swarm configuration." type: "object" properties: Name: description: "Name of the swarm." type: "string" example: "default" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.corp.type: "production" com.example.corp.department: "engineering" Orchestration: description: "Orchestration configuration." type: "object" x-nullable: true properties: TaskHistoryRetentionLimit: description: | The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks. type: "integer" format: "int64" example: 10 Raft: description: "Raft configuration." type: "object" properties: SnapshotInterval: description: "The number of log entries between snapshots." type: "integer" format: "uint64" example: 10000 KeepOldSnapshots: description: | The number of snapshots to keep beyond the current snapshot. type: "integer" format: "uint64" LogEntriesForSlowFollowers: description: | The number of log entries to keep around to sync up slow followers after a snapshot is created. type: "integer" format: "uint64" example: 500 ElectionTick: description: | The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 3 HeartbeatTick: description: | The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 1 Dispatcher: description: "Dispatcher configuration." type: "object" x-nullable: true properties: HeartbeatPeriod: description: | The delay for an agent to send a heartbeat to the dispatcher. type: "integer" format: "int64" example: 5000000000 CAConfig: description: "CA configuration." type: "object" x-nullable: true properties: NodeCertExpiry: description: "The duration node certificates are issued for." type: "integer" format: "int64" example: 7776000000000000 ExternalCAs: description: | Configuration for forwarding signing requests to an external certificate authority. type: "array" items: type: "object" properties: Protocol: description: | Protocol for communication with the external CA (currently only `cfssl` is supported). type: "string" enum: - "cfssl" default: "cfssl" URL: description: | URL where certificate signing requests should be sent. type: "string" Options: description: | An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver. type: "object" additionalProperties: type: "string" CACert: description: | The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided). type: "string" SigningCACert: description: | The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format. type: "string" SigningCAKey: description: | The desired signing CA key for all swarm node TLS leaf certificates, in PEM format. type: "string" ForceRotate: description: | An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey` format: "uint64" type: "integer" EncryptionConfig: description: "Parameters related to encryption-at-rest." type: "object" properties: AutoLockManagers: description: | If set, generate a key and use it to lock data stored on the managers. type: "boolean" example: false TaskDefaults: description: "Defaults for creating tasks in this cluster." type: "object" properties: LogDriver: description: | The log driver to use for tasks created in the orchestrator if unspecified by a service. Updating this value only affects new tasks. Existing tasks continue to use their previously configured log driver until recreated. type: "object" properties: Name: description: | The log driver to use as a default for new tasks. type: "string" example: "json-file" Options: description: | Driver-specific options for the selectd log driver, specified as key/value pairs. type: "object" additionalProperties: type: "string" example: "max-file": "10" "max-size": "100m" # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but # without `JoinTokens`. ClusterInfo: description: | ClusterInfo represents information about the swarm as is returned by the "/info" endpoint. Join-tokens are not included. x-nullable: true type: "object" properties: ID: description: "The ID of the swarm." type: "string" example: "abajmipo7b4xz5ip2nrla6b11" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: description: | Date and time at which the swarm was initialised in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: description: | Date and time at which the swarm was last updated in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/SwarmSpec" TLSInfo: $ref: "#/definitions/TLSInfo" RootRotationInProgress: description: | Whether there is currently a root CA rotation in progress for the swarm type: "boolean" example: false DataPathPort: description: | DataPathPort specifies the data path port number for data traffic. Acceptable port range is 1024 to 49151. If no port is set or is set to 0, the default port (4789) is used. type: "integer" format: "uint32" default: 4789 example: 4789 DefaultAddrPool: description: | Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" format: "CIDR" example: ["10.10.0.0/16", "20.20.0.0/16"] SubnetSize: description: | SubnetSize specifies the subnet size of the networks created from the default subnet pool. type: "integer" format: "uint32" maximum: 29 default: 24 example: 24 JoinTokens: description: | JoinTokens contains the tokens workers and managers need to join the swarm. type: "object" properties: Worker: description: | The token workers can use to join the swarm. type: "string" example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" Manager: description: | The token managers can use to join the swarm. type: "string" example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" Swarm: type: "object" allOf: - $ref: "#/definitions/ClusterInfo" - type: "object" properties: JoinTokens: $ref: "#/definitions/JoinTokens" TaskSpec: description: "User modifiable task configuration." type: "object" properties: PluginSpec: type: "object" description: | Plugin spec for the service. *(Experimental release only.)* <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. properties: Name: description: "The name or 'alias' to use for the plugin." type: "string" Remote: description: "The plugin image reference to use." type: "string" Disabled: description: "Disable the plugin once scheduled." type: "boolean" PluginPrivilege: type: "array" items: $ref: "#/definitions/PluginPrivilegeItem" ContainerSpec: type: "object" description: | Container spec for the service. <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. properties: Image: description: "The image name to use for the container" type: "string" Labels: description: "User-defined key/value data." type: "object" additionalProperties: type: "string" Command: description: "The command to be run in the image." type: "array" items: type: "string" Args: description: "Arguments to the command." type: "array" items: type: "string" Hostname: description: | The hostname to use for the container, as a valid [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. type: "string" Env: description: | A list of environment variables in the form `VAR=value`. type: "array" items: type: "string" Dir: description: "The working directory for commands to run in." type: "string" User: description: "The user inside the container." type: "string" Groups: type: "array" description: | A list of additional groups that the container process will run as. items: type: "string" Privileges: type: "object" description: "Security options for the container" properties: CredentialSpec: type: "object" description: "CredentialSpec for managed service account (Windows only)" properties: Config: type: "string" example: "0bt9dmxjvjiqermk6xrop3ekq" description: | Load credential spec from a Swarm Config with the given ID. The specified config must also be present in the Configs field with the Runtime property set. <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. File: type: "string" example: "spec.json" description: | Load credential spec from this file. The file is read by the daemon, and must be present in the `CredentialSpecs` subdirectory in the docker data directory, which defaults to `C:\ProgramData\Docker\` on Windows. For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`. <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. Registry: type: "string" description: | Load credential spec from this value in the Windows registry. The specified registry value must be located in: `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. SELinuxContext: type: "object" description: "SELinux labels of the container" properties: Disable: type: "boolean" description: "Disable SELinux" User: type: "string" description: "SELinux user label" Role: type: "string" description: "SELinux role label" Type: type: "string" description: "SELinux type label" Level: type: "string" description: "SELinux level label" TTY: description: "Whether a pseudo-TTY should be allocated." type: "boolean" OpenStdin: description: "Open `stdin`" type: "boolean" ReadOnly: description: "Mount the container's root filesystem as read only." type: "boolean" Mounts: description: | Specification for mounts to be added to containers created as part of the service. type: "array" items: $ref: "#/definitions/Mount" StopSignal: description: "Signal to stop the container." type: "string" StopGracePeriod: description: | Amount of time to wait for the container to terminate before forcefully killing it. type: "integer" format: "int64" HealthCheck: $ref: "#/definitions/HealthConfig" Hosts: type: "array" description: | A list of hostname/IP mappings to add to the container's `hosts` file. The format of extra hosts is specified in the [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) man page: IP_address canonical_hostname [aliases...] items: type: "string" DNSConfig: description: | Specification for DNS related configurations in resolver configuration file (`resolv.conf`). type: "object" properties: Nameservers: description: "The IP addresses of the name servers." type: "array" items: type: "string" Search: description: "A search list for host-name lookup." type: "array" items: type: "string" Options: description: | A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.). type: "array" items: type: "string" Secrets: description: | Secrets contains references to zero or more secrets that will be exposed to the service. type: "array" items: type: "object" properties: File: description: | File represents a specific target that is backed by a file. type: "object" properties: Name: description: | Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." type: "string" GID: description: "GID represents the file GID." type: "string" Mode: description: "Mode represents the FileMode of the file." type: "integer" format: "uint32" SecretID: description: | SecretID represents the ID of the specific secret that we're referencing. type: "string" SecretName: description: | SecretName is the name of the secret that this references, but this is just provided for lookup/display purposes. The secret in the reference will be identified by its ID. type: "string" Configs: description: | Configs contains references to zero or more configs that will be exposed to the service. type: "array" items: type: "object" properties: File: description: | File represents a specific target that is backed by a file. <p><br /><p> > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive type: "object" properties: Name: description: | Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." type: "string" GID: description: "GID represents the file GID." type: "string" Mode: description: "Mode represents the FileMode of the file." type: "integer" format: "uint32" Runtime: description: | Runtime represents a target that is not mounted into the container but is used by the task <p><br /><p> > **Note**: `Configs.File` and `Configs.Runtime` are mutually > exclusive type: "object" ConfigID: description: | ConfigID represents the ID of the specific config that we're referencing. type: "string" ConfigName: description: | ConfigName is the name of the config that this references, but this is just provided for lookup/display purposes. The config in the reference will be identified by its ID. type: "string" Isolation: type: "string" description: | Isolation technology of the containers running the service. (Windows only) enum: - "default" - "process" - "hyperv" Init: description: | Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. type: "boolean" x-nullable: true Sysctls: description: | Set kernel namedspaced parameters (sysctls) in the container. The Sysctls option on services accepts the same sysctls as the are supported on containers. Note that while the same sysctls are supported, no guarantees or checks are made about their suitability for a clustered environment, and it's up to the user to determine whether a given sysctl will work properly in a Service. type: "object" additionalProperties: type: "string" # This option is not used by Windows containers CapabilityAdd: type: "array" description: | A list of kernel capabilities to add to the default set for the container. items: type: "string" example: - "CAP_NET_RAW" - "CAP_SYS_ADMIN" - "CAP_SYS_CHROOT" - "CAP_SYSLOG" CapabilityDrop: type: "array" description: | A list of kernel capabilities to drop from the default set for the container. items: type: "string" example: - "CAP_NET_RAW" Ulimits: description: | A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" type: "array" items: type: "object" properties: Name: description: "Name of ulimit" type: "string" Soft: description: "Soft limit" type: "integer" Hard: description: "Hard limit" type: "integer" NetworkAttachmentSpec: description: | Read-only spec type for non-swarm containers attached to swarm overlay networks. <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. type: "object" properties: ContainerID: description: "ID of the container represented by this task" type: "string" Resources: description: | Resource requirements which apply to each individual container created as part of the service. type: "object" properties: Limits: description: "Define resources limits." $ref: "#/definitions/Limit" Reservation: description: "Define resources reservation." $ref: "#/definitions/ResourceObject" RestartPolicy: description: | Specification for the restart policy which applies to containers created as part of this service. type: "object" properties: Condition: description: "Condition for restart." type: "string" enum: - "none" - "on-failure" - "any" Delay: description: "Delay between restart attempts." type: "integer" format: "int64" MaxAttempts: description: | Maximum attempts to restart a given container before giving up (default value is 0, which is ignored). type: "integer" format: "int64" default: 0 Window: description: | Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded). type: "integer" format: "int64" default: 0 Placement: type: "object" properties: Constraints: description: | An array of constraint expressions to limit the set of nodes where a task can be scheduled. Constraint expressions can either use a _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find nodes that satisfy every expression (AND match). Constraints can match node or Docker Engine labels as follows: node attribute | matches | example ---------------------|--------------------------------|----------------------------------------------- `node.id` | Node ID | `node.id==2ivku8v2gvtg4` `node.hostname` | Node hostname | `node.hostname!=node-2` `node.role` | Node role (`manager`/`worker`) | `node.role==manager` `node.platform.os` | Node operating system | `node.platform.os==windows` `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` `node.labels` | User-defined node labels | `node.labels.security==high` `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` `engine.labels` apply to Docker Engine labels like operating system, drivers, etc. Swarm administrators add `node.labels` for operational purposes by using the [`node update endpoint`](#operation/NodeUpdate). type: "array" items: type: "string" example: - "node.hostname!=node3.corp.example.com" - "node.role!=manager" - "node.labels.type==production" - "node.platform.os==linux" - "node.platform.arch==x86_64" Preferences: description: | Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence. type: "array" items: type: "object" properties: Spread: type: "object" properties: SpreadDescriptor: description: | label descriptor, such as `engine.labels.az`. type: "string" example: - Spread: SpreadDescriptor: "node.labels.datacenter" - Spread: SpreadDescriptor: "node.labels.rack" MaxReplicas: description: | Maximum number of replicas for per node (default value is 0, which is unlimited) type: "integer" format: "int64" default: 0 Platforms: description: | Platforms stores all the platforms that the service's image can run on. This field is used in the platform filter for scheduling. If empty, then the platform filter is off, meaning there are no scheduling restrictions. type: "array" items: $ref: "#/definitions/Platform" ForceUpdate: description: | A counter that triggers an update even if no relevant parameters have been changed. type: "integer" Runtime: description: | Runtime is the type of runtime specified for the task executor. type: "string" Networks: description: "Specifies which networks the service should attach to." type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" LogDriver: description: | Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified. type: "object" properties: Name: type: "string" Options: type: "object" additionalProperties: type: "string" TaskState: type: "string" enum: - "new" - "allocated" - "pending" - "assigned" - "accepted" - "preparing" - "ready" - "starting" - "running" - "complete" - "shutdown" - "failed" - "rejected" - "remove" - "orphaned" Task: type: "object" properties: ID: description: "The ID of the task." type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Name: description: "Name of the task." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Spec: $ref: "#/definitions/TaskSpec" ServiceID: description: "The ID of the service this task is part of." type: "string" Slot: type: "integer" NodeID: description: "The ID of the node that this task is on." type: "string" AssignedGenericResources: $ref: "#/definitions/GenericResources" Status: type: "object" properties: Timestamp: type: "string" format: "dateTime" State: $ref: "#/definitions/TaskState" Message: type: "string" Err: type: "string" ContainerStatus: type: "object" properties: ContainerID: type: "string" PID: type: "integer" ExitCode: type: "integer" DesiredState: $ref: "#/definitions/TaskState" JobIteration: description: | If the Service this Task belongs to is a job-mode service, contains the JobIteration of the Service this Task was created for. Absent if the Task was created for a Replicated or Global Service. $ref: "#/definitions/ObjectVersion" example: ID: "0kzzo1i0y4jz6027t0k7aezc7" Version: Index: 71 CreatedAt: "2016-06-07T21:07:31.171892745Z" UpdatedAt: "2016-06-07T21:07:31.376370513Z" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:31.290032978Z" State: "running" Message: "started" ContainerStatus: ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" PID: 677 DesiredState: "running" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.10/16" AssignedGenericResources: - DiscreteResourceSpec: Kind: "SSD" Value: 3 - NamedResourceSpec: Kind: "GPU" Value: "UUID1" - NamedResourceSpec: Kind: "GPU" Value: "UUID2" ServiceSpec: description: "User modifiable configuration for a service." properties: Name: description: "Name of the service." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" TaskTemplate: $ref: "#/definitions/TaskSpec" Mode: description: "Scheduling mode for the service." type: "object" properties: Replicated: type: "object" properties: Replicas: type: "integer" format: "int64" Global: type: "object" ReplicatedJob: description: | The mode used for services with a finite number of tasks that run to a completed state. type: "object" properties: MaxConcurrent: description: | The maximum number of replicas to run simultaneously. type: "integer" format: "int64" default: 1 TotalCompletions: description: | The total number of replicas desired to reach the Completed state. If unset, will default to the value of `MaxConcurrent` type: "integer" format: "int64" GlobalJob: description: | The mode used for services which run a task to the completed state on each valid node. type: "object" UpdateConfig: description: "Specification for the update strategy of the service." type: "object" properties: Parallelism: description: | Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism). type: "integer" format: "int64" Delay: description: "Amount of time between updates, in nanoseconds." type: "integer" format: "int64" FailureAction: description: | Action to take if an updated task fails to run, or stops running during the update. type: "string" enum: - "continue" - "pause" - "rollback" Monitor: description: | Amount of time to monitor each updated task for failures, in nanoseconds. type: "integer" format: "int64" MaxFailureRatio: description: | The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1. type: "number" default: 0 Order: description: | The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down. type: "string" enum: - "stop-first" - "start-first" RollbackConfig: description: "Specification for the rollback strategy of the service." type: "object" properties: Parallelism: description: | Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism). type: "integer" format: "int64" Delay: description: | Amount of time between rollback iterations, in nanoseconds. type: "integer" format: "int64" FailureAction: description: | Action to take if an rolled back task fails to run, or stops running during the rollback. type: "string" enum: - "continue" - "pause" Monitor: description: | Amount of time to monitor each rolled back task for failures, in nanoseconds. type: "integer" format: "int64" MaxFailureRatio: description: | The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1. type: "number" default: 0 Order: description: | The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down. type: "string" enum: - "stop-first" - "start-first" Networks: description: "Specifies which networks the service should attach to." type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" EndpointSpec: $ref: "#/definitions/EndpointSpec" EndpointPortConfig: type: "object" properties: Name: type: "string" Protocol: type: "string" enum: - "tcp" - "udp" - "sctp" TargetPort: description: "The port inside the container." type: "integer" PublishedPort: description: "The port on the swarm hosts." type: "integer" PublishMode: description: | The mode in which port is published. <p><br /></p> - "ingress" makes the target port accessible on every node, regardless of whether there is a task for the service running on that node or not. - "host" bypasses the routing mesh and publish the port directly on the swarm node where that service is running. type: "string" enum: - "ingress" - "host" default: "ingress" example: "ingress" EndpointSpec: description: "Properties that can be configured to access and load balance a service." type: "object" properties: Mode: description: | The mode of resolution to use for internal load balancing between tasks. type: "string" enum: - "vip" - "dnsrr" default: "vip" Ports: description: | List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used. type: "array" items: $ref: "#/definitions/EndpointPortConfig" Service: type: "object" properties: ID: type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Spec: $ref: "#/definitions/ServiceSpec" Endpoint: type: "object" properties: Spec: $ref: "#/definitions/EndpointSpec" Ports: type: "array" items: $ref: "#/definitions/EndpointPortConfig" VirtualIPs: type: "array" items: type: "object" properties: NetworkID: type: "string" Addr: type: "string" UpdateStatus: description: "The status of a service update." type: "object" properties: State: type: "string" enum: - "updating" - "paused" - "completed" StartedAt: type: "string" format: "dateTime" CompletedAt: type: "string" format: "dateTime" Message: type: "string" ServiceStatus: description: | The status of the service's tasks. Provided only when requested as part of a ServiceList operation. type: "object" properties: RunningTasks: description: | The number of tasks for the service currently in the Running state. type: "integer" format: "uint64" example: 7 DesiredTasks: description: | The number of tasks for the service desired to be running. For replicated services, this is the replica count from the service spec. For global services, this is computed by taking count of all tasks for the service with a Desired State other than Shutdown. type: "integer" format: "uint64" example: 10 CompletedTasks: description: | The number of tasks for a job that are in the Completed state. This field must be cross-referenced with the service type, as the value of 0 may mean the service is not in a job mode, or it may mean the job-mode service has no tasks yet Completed. type: "integer" format: "uint64" JobStatus: description: | The status of the service when it is in one of ReplicatedJob or GlobalJob modes. Absent on Replicated and Global mode services. The JobIteration is an ObjectVersion, but unlike the Service's version, does not need to be sent with an update request. type: "object" properties: JobIteration: description: | JobIteration is a value increased each time a Job is executed, successfully or otherwise. "Executed", in this case, means the job as a whole has been started, not that an individual Task has been launched. A job is "Executed" when its ServiceSpec is updated. JobIteration can be used to disambiguate Tasks belonging to different executions of a job. Though JobIteration will increase with each subsequent execution, it may not necessarily increase by 1, and so JobIteration should not be used to $ref: "#/definitions/ObjectVersion" LastExecution: description: | The last time, as observed by the server, that this job was started. type: "string" format: "dateTime" example: ID: "9mnpnzenvg8p8tdbtq4wvbkcz" Version: Index: 19 CreatedAt: "2016-06-07T21:05:51.880065305Z" UpdatedAt: "2016-06-07T21:07:29.962229872Z" Spec: Name: "hopeful_cori" TaskTemplate: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ForceUpdate: 0 Mode: Replicated: Replicas: 1 UpdateConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Mode: "vip" Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 Endpoint: Spec: Mode: "vip" Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 VirtualIPs: - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" Addr: "10.255.0.2/16" - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" Addr: "10.255.0.3/16" ImageDeleteResponseItem: type: "object" properties: Untagged: description: "The image ID of an image that was untagged" type: "string" Deleted: description: "The image ID of an image that was deleted" type: "string" ServiceUpdateResponse: type: "object" properties: Warnings: description: "Optional warning messages" type: "array" items: type: "string" example: Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" ContainerSummary: type: "object" properties: Id: description: "The ID of this container" type: "string" x-go-name: "ID" Names: description: "The names that this container has been given" type: "array" items: type: "string" Image: description: "The name of the image used when creating this container" type: "string" ImageID: description: "The ID of the image that this container was created from" type: "string" Command: description: "Command to run when starting the container" type: "string" Created: description: "When the container was created" type: "integer" format: "int64" Ports: description: "The ports exposed by this container" type: "array" items: $ref: "#/definitions/Port" SizeRw: description: "The size of files that have been created or changed by this container" type: "integer" format: "int64" SizeRootFs: description: "The total size of all the files in this container" type: "integer" format: "int64" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" State: description: "The state of this container (e.g. `Exited`)" type: "string" Status: description: "Additional human-readable status of this container (e.g. `Exit 0`)" type: "string" HostConfig: type: "object" properties: NetworkMode: type: "string" NetworkSettings: description: "A summary of the container's network settings" type: "object" properties: Networks: type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" Mounts: type: "array" items: $ref: "#/definitions/Mount" Driver: description: "Driver represents a driver (network, logging, secrets)." type: "object" required: [Name] properties: Name: description: "Name of the driver." type: "string" x-nullable: false example: "some-driver" Options: description: "Key/value map of driver-specific options." type: "object" x-nullable: false additionalProperties: type: "string" example: OptionA: "value for driver-specific option A" OptionB: "value for driver-specific option B" SecretSpec: type: "object" properties: Name: description: "User-defined name of the secret." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Data: description: | Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) data to store as secret. This field is only used to _create_ a secret, and is not returned by other endpoints. type: "string" example: "" Driver: description: | Name of the secrets driver used to fetch the secret's value from an external secret store. $ref: "#/definitions/Driver" Templating: description: | Templating driver, if applicable Templating controls whether and how to evaluate the config payload as a template. If no driver is set, no templating is used. $ref: "#/definitions/Driver" Secret: type: "object" properties: ID: type: "string" example: "blt1owaxmitz71s9v5zh81zun" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" example: "2017-07-20T13:55:28.678958722Z" UpdatedAt: type: "string" format: "dateTime" example: "2017-07-20T13:55:28.678958722Z" Spec: $ref: "#/definitions/SecretSpec" ConfigSpec: type: "object" properties: Name: description: "User-defined name of the config." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Data: description: | Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) config data. type: "string" Templating: description: | Templating driver, if applicable Templating controls whether and how to evaluate the config payload as a template. If no driver is set, no templating is used. $ref: "#/definitions/Driver" Config: type: "object" properties: ID: type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Spec: $ref: "#/definitions/ConfigSpec" ContainerState: description: | ContainerState stores container's running state. It's part of ContainerJSONBase and will be returned by the "inspect" command. type: "object" properties: Status: description: | String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead". type: "string" enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] example: "running" Running: description: | Whether this container is running. Note that a running container can be _paused_. The `Running` and `Paused` booleans are not mutually exclusive: When pausing a container (on Linux), the freezer cgroup is used to suspend all processes in the container. Freezing the process requires the process to be running. As a result, paused containers are both `Running` _and_ `Paused`. Use the `Status` field instead to determine if a container's state is "running". type: "boolean" example: true Paused: description: "Whether this container is paused." type: "boolean" example: false Restarting: description: "Whether this container is restarting." type: "boolean" example: false OOMKilled: description: | Whether this container has been killed because it ran out of memory. type: "boolean" example: false Dead: type: "boolean" example: false Pid: description: "The process ID of this container" type: "integer" example: 1234 ExitCode: description: "The last exit code of this container" type: "integer" example: 0 Error: type: "string" StartedAt: description: "The time when this container was last started." type: "string" example: "2020-01-06T09:06:59.461876391Z" FinishedAt: description: "The time when this container last exited." type: "string" example: "2020-01-06T09:07:59.461876391Z" Health: x-nullable: true $ref: "#/definitions/Health" SystemVersion: type: "object" description: | Response of Engine API: GET "/version" properties: Platform: type: "object" required: [Name] properties: Name: type: "string" Components: type: "array" description: | Information about system components items: type: "object" x-go-name: ComponentVersion required: [Name, Version] properties: Name: description: | Name of the component type: "string" example: "Engine" Version: description: | Version of the component type: "string" x-nullable: false example: "19.03.12" Details: description: | Key/value pairs of strings with additional information about the component. These values are intended for informational purposes only, and their content is not defined, and not part of the API specification. These messages can be printed by the client as information to the user. type: "object" x-nullable: true Version: description: "The version of the daemon" type: "string" example: "19.03.12" ApiVersion: description: | The default (and highest) API version that is supported by the daemon type: "string" example: "1.40" MinAPIVersion: description: | The minimum API version that is supported by the daemon type: "string" example: "1.12" GitCommit: description: | The Git commit of the source code that was used to build the daemon type: "string" example: "48a66213fe" GoVersion: description: | The version Go used to compile the daemon, and the version of the Go runtime in use. type: "string" example: "go1.13.14" Os: description: | The operating system that the daemon is running on ("linux" or "windows") type: "string" example: "linux" Arch: description: | The architecture that the daemon is running on type: "string" example: "amd64" KernelVersion: description: | The kernel version (`uname -r`) that the daemon is running on. This field is omitted when empty. type: "string" example: "4.19.76-linuxkit" Experimental: description: | Indicates if the daemon is started with experimental features enabled. This field is omitted when empty / false. type: "boolean" example: true BuildTime: description: | The date and time that the daemon was compiled. type: "string" example: "2020-06-22T15:49:27.000000000+00:00" SystemInfo: type: "object" properties: ID: description: | Unique identifier of the daemon. <p><br /></p> > **Note**: The format of the ID itself is not part of the API, and > should not be considered stable. type: "string" example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" Containers: description: "Total number of containers on the host." type: "integer" example: 14 ContainersRunning: description: | Number of containers with status `"running"`. type: "integer" example: 3 ContainersPaused: description: | Number of containers with status `"paused"`. type: "integer" example: 1 ContainersStopped: description: | Number of containers with status `"stopped"`. type: "integer" example: 10 Images: description: | Total number of images on the host. Both _tagged_ and _untagged_ (dangling) images are counted. type: "integer" example: 508 Driver: description: "Name of the storage driver in use." type: "string" example: "overlay2" DriverStatus: description: | Information specific to the storage driver, provided as "label" / "value" pairs. This information is provided by the storage driver, and formatted in a way consistent with the output of `docker info` on the command line. <p><br /></p> > **Note**: The information returned in this field, including the > formatting of values and labels, should not be considered stable, > and may change without notice. type: "array" items: type: "array" items: type: "string" example: - ["Backing Filesystem", "extfs"] - ["Supports d_type", "true"] - ["Native Overlay Diff", "true"] DockerRootDir: description: | Root directory of persistent Docker state. Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` on Windows. type: "string" example: "/var/lib/docker" Plugins: $ref: "#/definitions/PluginsInfo" MemoryLimit: description: "Indicates if the host has memory limit support enabled." type: "boolean" example: true SwapLimit: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true KernelMemory: description: | Indicates if the host has kernel memory limit support enabled. <p><br /></p> > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated > `kmem.limit_in_bytes`. type: "boolean" example: true CpuCfsPeriod: description: | Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host. type: "boolean" example: true CpuCfsQuota: description: | Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host. type: "boolean" example: true CPUShares: description: | Indicates if CPU Shares limiting is supported by the host. type: "boolean" example: true CPUSet: description: | Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) type: "boolean" example: true PidsLimit: description: "Indicates if the host kernel has PID limit support enabled." type: "boolean" example: true OomKillDisable: description: "Indicates if OOM killer disable is supported on the host." type: "boolean" IPv4Forwarding: description: "Indicates IPv4 forwarding is enabled." type: "boolean" example: true BridgeNfIptables: description: "Indicates if `bridge-nf-call-iptables` is available on the host." type: "boolean" example: true BridgeNfIp6tables: description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." type: "boolean" example: true Debug: description: | Indicates if the daemon is running in debug-mode / with debug-level logging enabled. type: "boolean" example: true NFd: description: | The total number of file Descriptors in use by the daemon process. This information is only returned if debug-mode is enabled. type: "integer" example: 64 NGoroutines: description: | The number of goroutines that currently exist. This information is only returned if debug-mode is enabled. type: "integer" example: 174 SystemTime: description: | Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" example: "2017-08-08T20:28:29.06202363Z" LoggingDriver: description: | The logging driver to use as a default for new containers. type: "string" CgroupDriver: description: | The driver to use for managing cgroups. type: "string" enum: ["cgroupfs", "systemd", "none"] default: "cgroupfs" example: "cgroupfs" CgroupVersion: description: | The version of the cgroup. type: "string" enum: ["1", "2"] default: "1" example: "1" NEventsListener: description: "Number of event listeners subscribed." type: "integer" example: 30 KernelVersion: description: | Kernel version of the host. On Linux, this information obtained from `uname`. On Windows this information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd> registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. type: "string" example: "4.9.38-moby" OperatingSystem: description: | Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" or "Windows Server 2016 Datacenter" type: "string" example: "Alpine Linux v3.5" OSVersion: description: | Version of the host's operating system <p><br /></p> > **Note**: The information returned in this field, including its > very existence, and the formatting of values, should not be considered > stable, and may change without notice. type: "string" example: "16.04" OSType: description: | Generic type of the operating system of the host, as returned by the Go runtime (`GOOS`). Currently returned values are "linux" and "windows". A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). type: "string" example: "linux" Architecture: description: | Hardware architecture of the host, as returned by the Go runtime (`GOARCH`). A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). type: "string" example: "x86_64" NCPU: description: | The number of logical CPUs usable by the daemon. The number of available CPUs is checked by querying the operating system when the daemon starts. Changes to operating system CPU allocation after the daemon is started are not reflected. type: "integer" example: 4 MemTotal: description: | Total amount of physical memory available on the host, in bytes. type: "integer" format: "int64" example: 2095882240 IndexServerAddress: description: | Address / URL of the index server that is used for image search, and as a default for user authentication for Docker Hub and Docker Cloud. default: "https://index.docker.io/v1/" type: "string" example: "https://index.docker.io/v1/" RegistryConfig: $ref: "#/definitions/RegistryServiceConfig" GenericResources: $ref: "#/definitions/GenericResources" HttpProxy: description: | HTTP-proxy configured for the daemon. This value is obtained from the [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL are masked in the API response. Containers do not automatically inherit this configuration. type: "string" example: "http://xxxxx:[email protected]:8080" HttpsProxy: description: | HTTPS-proxy configured for the daemon. This value is obtained from the [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL are masked in the API response. Containers do not automatically inherit this configuration. type: "string" example: "https://xxxxx:[email protected]:4443" NoProxy: description: | Comma-separated list of domain extensions for which no proxy should be used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Containers do not automatically inherit this configuration. type: "string" example: "*.local, 169.254/16" Name: description: "Hostname of the host." type: "string" example: "node5.corp.example.com" Labels: description: | User-defined labels (key/value metadata) as set on the daemon. <p><br /></p> > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, > set through the daemon configuration, and _node_ labels, set from a > manager node in the Swarm. Node labels are not included in this > field. Node labels can be retrieved using the `/nodes/(id)` endpoint > on a manager node in the Swarm. type: "array" items: type: "string" example: ["storage=ssd", "production"] ExperimentalBuild: description: | Indicates if experimental features are enabled on the daemon. type: "boolean" example: true ServerVersion: description: | Version string of the daemon. > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) > returns the Swarm version instead of the daemon version, for example > `swarm/1.2.8`. type: "string" example: "17.06.0-ce" ClusterStore: description: | URL of the distributed storage backend. The storage backend is used for multihost networking (to store network and endpoint information) and by the node discovery mechanism. <p><br /></p> > **Deprecated**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. type: "string" example: "consul://consul.corp.example.com:8600/some/path" ClusterAdvertise: description: | The network endpoint that the Engine advertises for the purpose of node discovery. ClusterAdvertise is a `host:port` combination on which the daemon is reachable by other hosts. <p><br /></p> > **Deprecated**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. type: "string" example: "node5.corp.example.com:8000" Runtimes: description: | List of [OCI compliant](https://github.com/opencontainers/runtime-spec) runtimes configured on the daemon. Keys hold the "name" used to reference the runtime. The Docker daemon relies on an OCI compliant runtime (invoked via the `containerd` daemon) as its interface to the Linux kernel namespaces, cgroups, and SELinux. The default runtime is `runc`, and automatically configured. Additional runtimes can be configured by the user and will be listed here. type: "object" additionalProperties: $ref: "#/definitions/Runtime" default: runc: path: "runc" example: runc: path: "runc" runc-master: path: "/go/bin/runc" custom: path: "/usr/local/bin/my-oci-runtime" runtimeArgs: ["--debug", "--systemd-cgroup=false"] DefaultRuntime: description: | Name of the default OCI runtime that is used when starting containers. The default can be overridden per-container at create time. type: "string" default: "runc" example: "runc" Swarm: $ref: "#/definitions/SwarmInfo" LiveRestoreEnabled: description: | Indicates if live restore is enabled. If enabled, containers are kept running when the daemon is shutdown or upon daemon start if running containers are detected. type: "boolean" default: false example: false Isolation: description: | Represents the isolation technology to use as a default for containers. The supported values are platform-specific. If no isolation value is specified on daemon start, on Windows client, the default is `hyperv`, and on Windows server, the default is `process`. This option is currently not used on other platforms. default: "default" type: "string" enum: - "default" - "hyperv" - "process" InitBinary: description: | Name and, optional, path of the `docker-init` binary. If the path is omitted, the daemon searches the host's `$PATH` for the binary and uses the first result. type: "string" example: "docker-init" ContainerdCommit: $ref: "#/definitions/Commit" RuncCommit: $ref: "#/definitions/Commit" InitCommit: $ref: "#/definitions/Commit" SecurityOptions: description: | List of security features that are enabled on the daemon, such as apparmor, seccomp, SELinux, user-namespaces (userns), and rootless. Additional configuration options for each security feature may be present, and are included as a comma-separated list of key/value pairs. type: "array" items: type: "string" example: - "name=apparmor" - "name=seccomp,profile=default" - "name=selinux" - "name=userns" - "name=rootless" ProductLicense: description: | Reports a summary of the product license on the daemon. If a commercial license has been applied to the daemon, information such as number of nodes, and expiration are included. type: "string" example: "Community Engine" DefaultAddressPools: description: | List of custom default address pools for local networks, which can be specified in the daemon.json file or dockerd option. Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 10.10.[0-255].0/24 address pools. type: "array" items: type: "object" properties: Base: description: "The network address in CIDR format" type: "string" example: "10.10.0.0/16" Size: description: "The network pool size" type: "integer" example: "24" Warnings: description: | List of warnings / informational messages about missing features, or issues related to the daemon configuration. These messages can be printed by the client as information to the user. type: "array" items: type: "string" example: - "WARNING: No memory limit support" - "WARNING: bridge-nf-call-iptables is disabled" - "WARNING: bridge-nf-call-ip6tables is disabled" # PluginsInfo is a temp struct holding Plugins name # registered with docker daemon. It is used by Info struct PluginsInfo: description: | Available plugins per type. <p><br /></p> > **Note**: Only unmanaged (V1) plugins are included in this list. > V1 plugins are "lazily" loaded, and are not returned in this list > if there is no resource using the plugin. type: "object" properties: Volume: description: "Names of available volume-drivers, and network-driver plugins." type: "array" items: type: "string" example: ["local"] Network: description: "Names of available network-drivers, and network-driver plugins." type: "array" items: type: "string" example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] Authorization: description: "Names of available authorization plugins." type: "array" items: type: "string" example: ["img-authz-plugin", "hbm"] Log: description: "Names of available logging-drivers, and logging-driver plugins." type: "array" items: type: "string" example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] RegistryServiceConfig: description: | RegistryServiceConfig stores daemon registry services configuration. type: "object" x-nullable: true properties: AllowNondistributableArtifactsCIDRs: description: | List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). Some images (for example, Windows base images) contain artifacts whose distribution is restricted by license. When these images are pushed to a registry, restricted artifacts are not included. This configuration override this behavior, and enables the daemon to push nondistributable artifacts to all registries whose resolved IP address is within the subnet described by the CIDR syntax. This option is useful when pushing images containing nondistributable artifacts to a registry on an air-gapped network so hosts on that network can pull the images without connecting to another server. > **Warning**: Nondistributable artifacts typically have restrictions > on how and where they can be distributed and shared. Only use this > feature to push artifacts to private registries and ensure that you > are in compliance with any terms that cover redistributing > nondistributable artifacts. type: "array" items: type: "string" example: ["::1/128", "127.0.0.0/8"] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `<hostname>[:<port>]` or `<IP address>[:<port>]`. Some images (for example, Windows base images) contain artifacts whose distribution is restricted by license. When these images are pushed to a registry, restricted artifacts are not included. This configuration override this behavior for the specified registries. This option is useful when pushing images containing nondistributable artifacts to a registry on an air-gapped network so hosts on that network can pull the images without connecting to another server. > **Warning**: Nondistributable artifacts typically have restrictions > on how and where they can be distributed and shared. Only use this > feature to push artifacts to private registries and ensure that you > are in compliance with any terms that cover redistributing > nondistributable artifacts. type: "array" items: type: "string" example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. By default, local registries (`127.0.0.0/8`) are configured as insecure. All other registries are secure. Communicating with an insecure registry is not possible if the daemon assumes that registry is secure. This configuration override this behavior, insecure communication with registries whose resolved IP address is within the subnet described by the CIDR syntax. Registries can also be marked insecure by hostname. Those registries are listed under `IndexConfigs` and have their `Secure` field set to `false`. > **Warning**: Using this option can be useful when running a local > registry, but introduces security vulnerabilities. This option > should therefore ONLY be used for testing purposes. For increased > security, users should add their CA to their system's list of trusted > CAs instead of enabling this option. type: "array" items: type: "string" example: ["::1/128", "127.0.0.0/8"] IndexConfigs: type: "object" additionalProperties: $ref: "#/definitions/IndexInfo" example: "127.0.0.1:5000": "Name": "127.0.0.1:5000" "Mirrors": [] "Secure": false "Official": false "[2001:db8:a0b:12f0::1]:80": "Name": "[2001:db8:a0b:12f0::1]:80" "Mirrors": [] "Secure": false "Official": false "docker.io": Name: "docker.io" Mirrors: ["https://hub-mirror.corp.example.com:5000/"] Secure: true Official: true "registry.internal.corp.example.com:3000": Name: "registry.internal.corp.example.com:3000" Mirrors: [] Secure: false Official: false Mirrors: description: | List of registry URLs that act as a mirror for the official (`docker.io`) registry. type: "array" items: type: "string" example: - "https://hub-mirror.corp.example.com:5000/" - "https://[2001:db8:a0b:12f0::1]/" IndexInfo: description: IndexInfo contains information about a registry. type: "object" x-nullable: true properties: Name: description: | Name of the registry, such as "docker.io". type: "string" example: "docker.io" Mirrors: description: | List of mirrors, expressed as URIs. type: "array" items: type: "string" example: - "https://hub-mirror.corp.example.com:5000/" - "https://registry-2.docker.io/" - "https://registry-3.docker.io/" Secure: description: | Indicates if the registry is part of the list of insecure registries. If `false`, the registry is insecure. Insecure registries accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. > **Warning**: Insecure registries can be useful when running a local > registry. However, because its use creates security vulnerabilities > it should ONLY be enabled for testing purposes. For increased > security, users should add their CA to their system's list of > trusted CAs instead of enabling this option. type: "boolean" example: true Official: description: | Indicates whether this is an official registry (i.e., Docker Hub / docker.io) type: "boolean" example: true Runtime: description: | Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) runtime. The runtime is invoked by the daemon via the `containerd` daemon. OCI runtimes act as an interface to the Linux kernel namespaces, cgroups, and SELinux. type: "object" properties: path: description: | Name and, optional, path, of the OCI executable binary. If the path is omitted, the daemon searches the host's `$PATH` for the binary and uses the first result. type: "string" example: "/usr/local/bin/my-oci-runtime" runtimeArgs: description: | List of command-line arguments to pass to the runtime when invoked. type: "array" x-nullable: true items: type: "string" example: ["--debug", "--systemd-cgroup=false"] Commit: description: | Commit holds the Git-commit (SHA1) that a binary was built from, as reported in the version-string of external tools, such as `containerd`, or `runC`. type: "object" properties: ID: description: "Actual commit ID of external tool." type: "string" example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" Expected: description: | Commit ID of external tool expected by dockerd as set at build time. type: "string" example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" SwarmInfo: description: | Represents generic information about swarm. type: "object" properties: NodeID: description: "Unique identifier of for this node in the swarm." type: "string" default: "" example: "k67qz4598weg5unwwffg6z1m1" NodeAddr: description: | IP address at which this node can be reached by other nodes in the swarm. type: "string" default: "" example: "10.0.0.46" LocalNodeState: $ref: "#/definitions/LocalNodeState" ControlAvailable: type: "boolean" default: false example: true Error: type: "string" default: "" RemoteManagers: description: | List of ID's and addresses of other managers in the swarm. type: "array" default: null x-nullable: true items: $ref: "#/definitions/PeerNode" example: - NodeID: "71izy0goik036k48jg985xnds" Addr: "10.0.0.158:2377" - NodeID: "79y6h1o4gv8n120drcprv5nmc" Addr: "10.0.0.159:2377" - NodeID: "k67qz4598weg5unwwffg6z1m1" Addr: "10.0.0.46:2377" Nodes: description: "Total number of nodes in the swarm." type: "integer" x-nullable: true example: 4 Managers: description: "Total number of managers in the swarm." type: "integer" x-nullable: true example: 3 Cluster: $ref: "#/definitions/ClusterInfo" LocalNodeState: description: "Current local status of this node." type: "string" default: "" enum: - "" - "inactive" - "pending" - "active" - "error" - "locked" example: "active" PeerNode: description: "Represents a peer-node in the swarm" properties: NodeID: description: "Unique identifier of for this node in the swarm." type: "string" Addr: description: | IP address and ports at which this node can be reached. type: "string" NetworkAttachmentConfig: description: | Specifies how a service should be attached to a particular network. type: "object" properties: Target: description: | The target network for attachment. Must be a network name or ID. type: "string" Aliases: description: | Discoverable alternate names for the service on this network. type: "array" items: type: "string" DriverOpts: description: | Driver attachment options for the network target. type: "object" additionalProperties: type: "string" paths: /containers/json: get: summary: "List containers" description: | Returns a list of containers. For details on the format, see the [inspect endpoint](#operation/ContainerInspect). Note that it uses a different, smaller representation of a container than inspecting a single container. For example, the list of linked containers is not propagated . operationId: "ContainerList" produces: - "application/json" parameters: - name: "all" in: "query" description: | Return all containers. By default, only running containers are shown. type: "boolean" default: false - name: "limit" in: "query" description: | Return this number of most recently created containers, including non-running ones. type: "integer" - name: "size" in: "query" description: | Return the size of container as fields `SizeRw` and `SizeRootFs`. type: "boolean" default: false - name: "filters" in: "query" description: | Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`) - `before`=(`<container id>` or `<container name>`) - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) - `exited=<int>` containers with exit code of `<int>` - `health`=(`starting`|`healthy`|`unhealthy`|`none`) - `id=<ID>` a container's ID - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) - `is-task=`(`true`|`false`) - `label=key` or `label="key=value"` of a container label - `name=<name>` a container's name - `network`=(`<network id>` or `<network name>`) - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) - `since`=(`<container id>` or `<container name>`) - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) - `volume`=(`<volume name>` or `<mount point destination>`) type: "string" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/ContainerSummary" examples: application/json: - Id: "8dfafdbc3a40" Names: - "/boring_feynman" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 1" Created: 1367854155 State: "Exited" Status: "Exit 0" Ports: - PrivatePort: 2222 PublicPort: 3333 Type: "tcp" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" Gateway: "172.17.0.1" IPAddress: "172.17.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:02" Mounts: - Name: "fac362...80535" Source: "/data" Destination: "/data" Driver: "local" Mode: "ro,Z" RW: false Propagation: "" - Id: "9cd87474be90" Names: - "/coolName" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 222222" Created: 1367854155 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" Gateway: "172.17.0.1" IPAddress: "172.17.0.8" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:08" Mounts: [] - Id: "3176a2479c92" Names: - "/sleepy_dog" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 3333333333333333" Created: 1367854154 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" Gateway: "172.17.0.1" IPAddress: "172.17.0.6" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:06" Mounts: [] - Id: "4cb07b47f9fb" Names: - "/running_cat" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 444444444444444444444444444444444" Created: 1367854152 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" Gateway: "172.17.0.1" IPAddress: "172.17.0.5" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:05" Mounts: [] 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /containers/create: post: summary: "Create a container" operationId: "ContainerCreate" consumes: - "application/json" - "application/octet-stream" produces: - "application/json" parameters: - name: "name" in: "query" description: | Assign the specified name to the container. Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. type: "string" pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - name: "body" in: "body" description: "Container to create" schema: allOf: - $ref: "#/definitions/ContainerConfig" - type: "object" properties: HostConfig: $ref: "#/definitions/HostConfig" NetworkingConfig: $ref: "#/definitions/NetworkingConfig" example: Hostname: "" Domainname: "" User: "" AttachStdin: false AttachStdout: true AttachStderr: true Tty: false OpenStdin: false StdinOnce: false Env: - "FOO=bar" - "BAZ=quux" Cmd: - "date" Entrypoint: "" Image: "ubuntu" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" Volumes: /volumes/data: {} WorkingDir: "" NetworkDisabled: false MacAddress: "12:34:56:78:9a:bc" ExposedPorts: 22/tcp: {} StopSignal: "SIGTERM" StopTimeout: 10 HostConfig: Binds: - "/tmp:/tmp" Links: - "redis3:redis" Memory: 0 MemorySwap: 0 MemoryReservation: 0 KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 CpuPeriod: 100000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 CpuQuota: 50000 CpusetCpus: "0,1" CpusetMems: "0,1" MaximumIOps: 0 MaximumIOBps: 0 BlkioWeight: 300 BlkioWeightDevice: - {} BlkioDeviceReadBps: - {} BlkioDeviceReadIOps: - {} BlkioDeviceWriteBps: - {} BlkioDeviceWriteIOps: - {} DeviceRequests: - Driver: "nvidia" Count: -1 DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] Capabilities: [["gpu", "nvidia", "compute"]] Options: property1: "string" property2: "string" MemorySwappiness: 60 OomKillDisable: false OomScoreAdj: 500 PidMode: "" PidsLimit: 0 PortBindings: 22/tcp: - HostPort: "11022" PublishAllPorts: false Privileged: false ReadonlyRootfs: false Dns: - "8.8.8.8" DnsOptions: - "" DnsSearch: - "" VolumesFrom: - "parent" - "other:ro" CapAdd: - "NET_ADMIN" CapDrop: - "MKNOD" GroupAdd: - "newgroup" RestartPolicy: Name: "" MaximumRetryCount: 0 AutoRemove: true NetworkMode: "bridge" Devices: [] Ulimits: - {} LogConfig: Type: "json-file" Config: {} SecurityOpt: [] StorageOpt: {} CgroupParent: "" VolumeDriver: "" ShmSize: 67108864 NetworkingConfig: EndpointsConfig: isolated_nw: IPAMConfig: IPv4Address: "172.20.30.33" IPv6Address: "2001:db8:abcd::3033" LinkLocalIPs: - "169.254.34.68" - "fe80::3468" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" required: true responses: 201: description: "Container created successfully" schema: type: "object" title: "ContainerCreateResponse" description: "OK response to ContainerCreate operation" required: [Id, Warnings] properties: Id: description: "The ID of the created container" type: "string" x-nullable: false Warnings: description: "Warnings encountered when creating the container" type: "array" x-nullable: false items: type: "string" examples: application/json: Id: "e90e34656806" Warnings: [] 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such image" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: c2ada9df5af8" 409: description: "conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /containers/{id}/json: get: summary: "Inspect a container" description: "Return low-level information about a container." operationId: "ContainerInspect" produces: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "ContainerInspectResponse" properties: Id: description: "The ID of the container" type: "string" Created: description: "The time the container was created" type: "string" Path: description: "The path to the command being run" type: "string" Args: description: "The arguments to the command being run" type: "array" items: type: "string" State: x-nullable: true $ref: "#/definitions/ContainerState" Image: description: "The container's image ID" type: "string" ResolvConfPath: type: "string" HostnamePath: type: "string" HostsPath: type: "string" LogPath: type: "string" Name: type: "string" RestartCount: type: "integer" Driver: type: "string" Platform: type: "string" MountLabel: type: "string" ProcessLabel: type: "string" AppArmorProfile: type: "string" ExecIDs: description: "IDs of exec instances that are running in the container." type: "array" items: type: "string" x-nullable: true HostConfig: $ref: "#/definitions/HostConfig" GraphDriver: $ref: "#/definitions/GraphDriverData" SizeRw: description: | The size of files that have been created or changed by this container. type: "integer" format: "int64" SizeRootFs: description: "The total size of all the files in this container." type: "integer" format: "int64" Mounts: type: "array" items: $ref: "#/definitions/MountPoint" Config: $ref: "#/definitions/ContainerConfig" NetworkSettings: $ref: "#/definitions/NetworkSettings" examples: application/json: AppArmorProfile: "" Args: - "-c" - "exit 9" Config: AttachStderr: true AttachStdin: false AttachStdout: true Cmd: - "/bin/sh" - "-c" - "exit 9" Domainname: "" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Healthcheck: Test: ["CMD-SHELL", "exit 0"] Hostname: "ba033ac44011" Image: "ubuntu" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" MacAddress: "" NetworkDisabled: false OpenStdin: false StdinOnce: false Tty: false User: "" Volumes: /volumes/data: {} WorkingDir: "" StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" Driver: "devicemapper" ExecIDs: - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 BlkioWeight: 0 BlkioWeightDevice: - {} BlkioDeviceReadBps: - {} BlkioDeviceWriteBps: - {} BlkioDeviceReadIOps: - {} BlkioDeviceWriteIOps: - {} ContainerIDFile: "" CpusetCpus: "" CpusetMems: "" CpuPercent: 80 CpuShares: 0 CpuPeriod: 100000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 Devices: [] DeviceRequests: - Driver: "nvidia" Count: -1 DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] Capabilities: [["gpu", "nvidia", "compute"]] Options: property1: "string" property2: "string" IpcMode: "" LxcConf: [] Memory: 0 MemorySwap: 0 MemoryReservation: 0 KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" PidMode: "" PortBindings: {} Privileged: false ReadonlyRootfs: false PublishAllPorts: false RestartPolicy: MaximumRetryCount: 2 Name: "on-failure" LogConfig: Type: "json-file" Sysctls: net.ipv4.ip_forward: "1" Ulimits: - {} VolumeDriver: "" ShmSize: 67108864 HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" MountLabel: "" Name: "/boring_euclid" NetworkSettings: Bridge: "" SandboxID: "" HairpinMode: false LinkLocalIPv6Address: "" LinkLocalIPv6PrefixLen: 0 SandboxKey: "" EndpointID: "" Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 IPAddress: "" IPPrefixLen: 0 IPv6Gateway: "" MacAddress: "" Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" Gateway: "172.17.0.1" IPAddress: "172.17.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:12:00:02" Path: "/bin/sh" ProcessLabel: "" ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" RestartCount: 1 State: Error: "" ExitCode: 9 FinishedAt: "2015-01-06T15:47:32.080254511Z" Health: Status: "healthy" FailingStreak: 0 Log: - Start: "2019-12-22T10:59:05.6385933Z" End: "2019-12-22T10:59:05.8078452Z" ExitCode: 0 Output: "" OOMKilled: false Dead: false Paused: false Pid: 0 Restarting: false Running: true StartedAt: "2015-01-06T15:47:32.072697474Z" Status: "running" Mounts: - Name: "fac362...80535" Source: "/data" Destination: "/data" Driver: "local" Mode: "ro,Z" RW: false Propagation: "" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "size" in: "query" type: "boolean" default: false description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" tags: ["Container"] /containers/{id}/top: get: summary: "List processes running inside a container" description: | On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows. operationId: "ContainerTop" responses: 200: description: "no error" schema: type: "object" title: "ContainerTopResponse" description: "OK response to ContainerTop operation" properties: Titles: description: "The ps column titles" type: "array" items: type: "string" Processes: description: | Each process running in the container, where each is process is an array of values corresponding to the titles. type: "array" items: type: "array" items: type: "string" examples: application/json: Titles: - "UID" - "PID" - "PPID" - "C" - "STIME" - "TTY" - "TIME" - "CMD" Processes: - - "root" - "13642" - "882" - "0" - "17:03" - "pts/0" - "00:00:00" - "/bin/bash" - - "root" - "13735" - "13642" - "0" - "17:06" - "pts/0" - "00:00:00" - "sleep 10" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "ps_args" in: "query" description: "The arguments to pass to `ps`. For example, `aux`" type: "string" default: "-ef" tags: ["Container"] /containers/{id}/logs: get: summary: "Get container logs" description: | Get `stdout` and `stderr` logs from a container. Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. operationId: "ContainerLogs" responses: 200: description: | logs returned as a stream in response body. For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). Note that unlike the attach endpoint, the logs endpoint does not upgrade the connection and does not set Content-Type. schema: type: "string" format: "binary" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "until" in: "query" description: "Only return logs before this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Container"] /containers/{id}/changes: get: summary: "Get changes on a container’s filesystem" description: | Returns which files in a container's filesystem have been added, deleted, or modified. The `Kind` of modification can be one of: - `0`: Modified - `1`: Added - `2`: Deleted operationId: "ContainerChanges" produces: ["application/json"] responses: 200: description: "The list of changes" schema: type: "array" items: type: "object" x-go-name: "ContainerChangeResponseItem" title: "ContainerChangeResponseItem" description: "change item in response to ContainerChanges operation" required: [Path, Kind] properties: Path: description: "Path to file that has changed" type: "string" x-nullable: false Kind: description: "Kind of change" type: "integer" format: "uint8" enum: [0, 1, 2] x-nullable: false examples: application/json: - Path: "/dev" Kind: 0 - Path: "/dev/kmsg" Kind: 1 - Path: "/test" Kind: 1 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/export: get: summary: "Export a container" description: "Export the contents of a container as a tarball." operationId: "ContainerExport" produces: - "application/octet-stream" responses: 200: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/stats: get: summary: "Get container stats based on resource usage" description: | This endpoint returns a live stream of a container’s resource usage statistics. The `precpu_stats` is the CPU statistic of the *previous* read, and is used to calculate the CPU usage percentage. It is not an exact copy of the `cpu_stats` field. If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is nil then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. On a cgroup v2 host, the following fields are not set * `blkio_stats`: all fields other than `io_service_bytes_recursive` * `cpu_stats`: `cpu_usage.percpu_usage` * `memory_stats`: `max_usage` and `failcnt` Also, `memory_stats.stats` fields are incompatible with cgroup v1. To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: * used_memory = `memory_stats.usage - memory_stats.stats.cache` * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` operationId: "ContainerStats" produces: ["application/json"] responses: 200: description: "no error" schema: type: "object" examples: application/json: read: "2015-01-08T22:57:31.547920715Z" pids_stats: current: 3 networks: eth0: rx_bytes: 5338 rx_dropped: 0 rx_errors: 0 rx_packets: 36 tx_bytes: 648 tx_dropped: 0 tx_errors: 0 tx_packets: 8 eth5: rx_bytes: 4641 rx_dropped: 0 rx_errors: 0 rx_packets: 26 tx_bytes: 690 tx_dropped: 0 tx_errors: 0 tx_packets: 9 memory_stats: stats: total_pgmajfault: 0 cache: 0 mapped_file: 0 total_inactive_file: 0 pgpgout: 414 rss: 6537216 total_mapped_file: 0 writeback: 0 unevictable: 0 pgpgin: 477 total_unevictable: 0 pgmajfault: 0 total_rss: 6537216 total_rss_huge: 6291456 total_writeback: 0 total_inactive_anon: 0 rss_huge: 6291456 hierarchical_memory_limit: 67108864 total_pgfault: 964 total_active_file: 0 active_anon: 6537216 total_active_anon: 6537216 total_pgpgout: 414 total_cache: 0 inactive_anon: 0 active_file: 0 pgfault: 964 inactive_file: 0 total_pgpgin: 477 max_usage: 6651904 usage: 6537216 failcnt: 0 limit: 67108864 blkio_stats: {} cpu_stats: cpu_usage: percpu_usage: - 8646879 - 24472255 - 36438778 - 30657443 usage_in_usermode: 50000000 total_usage: 100215355 usage_in_kernelmode: 30000000 system_cpu_usage: 739306590000000 online_cpus: 4 throttling_data: periods: 0 throttled_periods: 0 throttled_time: 0 precpu_stats: cpu_usage: percpu_usage: - 8646879 - 24350896 - 36438778 - 30657443 usage_in_usermode: 50000000 total_usage: 100093996 usage_in_kernelmode: 30000000 system_cpu_usage: 9492140000000 online_cpus: 4 throttling_data: periods: 0 throttled_periods: 0 throttled_time: 0 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "stream" in: "query" description: | Stream the output. If false, the stats will be output once and then it will disconnect. type: "boolean" default: true - name: "one-shot" in: "query" description: | Only get a single stat instead of waiting for 2 cycles. Must be used with `stream=false`. type: "boolean" default: false tags: ["Container"] /containers/{id}/resize: post: summary: "Resize a container TTY" description: "Resize the TTY for a container." operationId: "ContainerResize" consumes: - "application/octet-stream" produces: - "text/plain" responses: 200: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "cannot resize container" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "h" in: "query" description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" description: "Width of the TTY session in characters" type: "integer" tags: ["Container"] /containers/{id}/start: post: summary: "Start a container" operationId: "ContainerStart" responses: 204: description: "no error" 304: description: "container already started" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. type: "string" tags: ["Container"] /containers/{id}/stop: post: summary: "Stop a container" operationId: "ContainerStop" responses: 204: description: "no error" 304: description: "container already stopped" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" type: "integer" tags: ["Container"] /containers/{id}/restart: post: summary: "Restart a container" operationId: "ContainerRestart" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" type: "integer" tags: ["Container"] /containers/{id}/kill: post: summary: "Kill a container" description: | Send a POSIX signal to a container, defaulting to killing to the container. operationId: "ContainerKill" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "container is not running" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "signal" in: "query" description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" type: "string" default: "SIGKILL" tags: ["Container"] /containers/{id}/update: post: summary: "Update a container" description: | Change various configuration options of a container without having to recreate it. operationId: "ContainerUpdate" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "The container has been updated." schema: type: "object" title: "ContainerUpdateResponse" description: "OK response to ContainerUpdate operation" properties: Warnings: type: "array" items: type: "string" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "update" in: "body" required: true schema: allOf: - $ref: "#/definitions/Resources" - type: "object" properties: RestartPolicy: $ref: "#/definitions/RestartPolicy" example: BlkioWeight: 300 CpuShares: 512 CpuPeriod: 100000 CpuQuota: 50000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 CpusetCpus: "0,1" CpusetMems: "0" Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" tags: ["Container"] /containers/{id}/rename: post: summary: "Rename a container" operationId: "ContainerRename" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "name already in use" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "name" in: "query" required: true description: "New name for the container" type: "string" tags: ["Container"] /containers/{id}/pause: post: summary: "Pause a container" description: | Use the freezer cgroup to suspend all processes in a container. Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the freezer cgroup the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. operationId: "ContainerPause" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/unpause: post: summary: "Unpause a container" description: "Resume a container which has been paused." operationId: "ContainerUnpause" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/attach: post: summary: "Attach to a container" description: | Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. ### Hijacking This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. This is the response from the daemon for an attach request: ``` HTTP/1.1 200 OK Content-Type: application/vnd.docker.raw-stream [STREAM] ``` After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. For example, the client sends this request to upgrade the connection: ``` POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 Upgrade: tcp Connection: Upgrade ``` The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: ``` HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp [STREAM] ``` ### Stream format When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). It is encoded on the first eight bytes like this: ```go header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} ``` `STREAM_TYPE` can be: - 0: `stdin` (is written on `stdout`) - 1: `stdout` - 2: `stderr` `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. The simplest way to implement this protocol is the following: 1. Read 8 bytes. 2. Choose `stdout` or `stderr` depending on the first byte. 3. Extract the frame size from the last four bytes. 4. Read the extracted size and output it on the correct output. 5. Goto 1. ### Stream format when using a TTY When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. operationId: "ContainerAttach" produces: - "application/vnd.docker.raw-stream" responses: 101: description: "no error, hints proxy about hijacking" 200: description: "no error, no upgrade header found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. type: "string" - name: "logs" in: "query" description: | Replay previous logs from the container. This is useful for attaching to a container that has started and you want to output everything since the container started. If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. type: "boolean" default: false - name: "stream" in: "query" description: | Stream attached streams from the time the request was made onwards. type: "boolean" default: false - name: "stdin" in: "query" description: "Attach to `stdin`" type: "boolean" default: false - name: "stdout" in: "query" description: "Attach to `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Attach to `stderr`" type: "boolean" default: false tags: ["Container"] /containers/{id}/attach/ws: get: summary: "Attach to a container via a websocket" operationId: "ContainerAttachWebsocket" responses: 101: description: "no error, hints proxy about hijacking" 200: description: "no error, no upgrade header found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`. type: "string" - name: "logs" in: "query" description: "Return logs" type: "boolean" default: false - name: "stream" in: "query" description: "Return stream" type: "boolean" default: false - name: "stdin" in: "query" description: "Attach to `stdin`" type: "boolean" default: false - name: "stdout" in: "query" description: "Attach to `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Attach to `stderr`" type: "boolean" default: false tags: ["Container"] /containers/{id}/wait: post: summary: "Wait for a container" description: "Block until a container stops, then returns the exit code." operationId: "ContainerWait" produces: ["application/json"] responses: 200: description: "The container has exit." schema: type: "object" title: "ContainerWaitResponse" description: "OK response to ContainerWait operation" required: [StatusCode] properties: StatusCode: description: "Exit code of the container" type: "integer" x-nullable: false Error: description: "container waiting error, if any" type: "object" properties: Message: description: "Details of an error" type: "string" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "condition" in: "query" description: | Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'. type: "string" default: "not-running" tags: ["Container"] /containers/{id}: delete: summary: "Remove a container" operationId: "ContainerDelete" responses: 204: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "conflict" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: | You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "v" in: "query" description: "Remove anonymous volumes associated with the container." type: "boolean" default: false - name: "force" in: "query" description: "If the container is running, kill it before removing it." type: "boolean" default: false - name: "link" in: "query" description: "Remove the specified link associated with the container." type: "boolean" default: false tags: ["Container"] /containers/{id}/archive: head: summary: "Get information about files in a container" description: | A response header `X-Docker-Container-Path-Stat` is returned, containing a base64 - encoded JSON object with some filesystem header information about the path. operationId: "ContainerArchiveInfo" responses: 200: description: "no error" headers: X-Docker-Container-Path-Stat: type: "string" description: | A base64 - encoded JSON object with some filesystem header information about the path 400: description: "Bad parameter" schema: allOf: - $ref: "#/definitions/ErrorResponse" - type: "object" properties: message: description: | The error message. Either "must specify path parameter" (path cannot be empty) or "not a directory" (path was asserted to be a directory but exists as a file). type: "string" x-nullable: false 404: description: "Container or path does not exist" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Resource in the container’s filesystem to archive." type: "string" tags: ["Container"] get: summary: "Get an archive of a filesystem resource in a container" description: "Get a tar archive of a resource in the filesystem of container id." operationId: "ContainerArchive" produces: ["application/x-tar"] responses: 200: description: "no error" 400: description: "Bad parameter" schema: allOf: - $ref: "#/definitions/ErrorResponse" - type: "object" properties: message: description: | The error message. Either "must specify path parameter" (path cannot be empty) or "not a directory" (path was asserted to be a directory but exists as a file). type: "string" x-nullable: false 404: description: "Container or path does not exist" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Resource in the container’s filesystem to archive." type: "string" tags: ["Container"] put: summary: "Extract an archive of files or folders to a directory in a container" description: "Upload a tar archive to be extracted to a path in the filesystem of container id." operationId: "PutContainerArchive" consumes: ["application/x-tar", "application/octet-stream"] responses: 200: description: "The content was extracted successfully" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 403: description: "Permission denied, the volume or container rootfs is marked as read-only." schema: $ref: "#/definitions/ErrorResponse" 404: description: "No such container or path does not exist inside the container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Path to a directory in the container to extract the archive’s contents into. " type: "string" - name: "noOverwriteDirNonDir" in: "query" description: | If `1`, `true`, or `True` then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa. type: "string" - name: "copyUIDGID" in: "query" description: | If `1`, `true`, then it will copy UID/GID maps to the dest file or dir type: "string" - name: "inputStream" in: "body" required: true description: | The input stream must be a tar archive compressed with one of the following algorithms: `identity` (no compression), `gzip`, `bzip2`, or `xz`. schema: type: "string" format: "binary" tags: ["Container"] /containers/prune: post: summary: "Delete stopped containers" produces: - "application/json" operationId: "ContainerPrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "ContainerPruneResponse" properties: ContainersDeleted: description: "Container IDs that were deleted" type: "array" items: type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /images/json: get: summary: "List Images" description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." operationId: "ImageList" produces: - "application/json" responses: 200: description: "Summary image data for the images matching the query" schema: type: "array" items: $ref: "#/definitions/ImageSummary" examples: application/json: - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" ParentId: "" RepoTags: - "ubuntu:12.04" - "ubuntu:precise" RepoDigests: - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" Created: 1474925151 Size: 103579269 VirtualSize: 103579269 SharedSize: 0 Labels: {} Containers: 2 - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" ParentId: "" RepoTags: - "ubuntu:12.10" - "ubuntu:quantal" RepoDigests: - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" Created: 1403128455 Size: 172064416 VirtualSize: 172064416 SharedSize: 0 Labels: {} Containers: 5 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "all" in: "query" description: "Show all images. Only images from a final layer (no children) are shown by default." type: "boolean" default: false - name: "filters" in: "query" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) - `dangling=true` - `label=key` or `label="key=value"` of an image label - `reference`=(`<image-name>[:<tag>]`) - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) type: "string" - name: "shared-size" in: "query" description: "Compute and show shared size as a `SharedSize` field on each image." type: "boolean" default: false - name: "digests" in: "query" description: "Show digest information as a `RepoDigests` field on each image." type: "boolean" default: false tags: ["Image"] /build: post: summary: "Build an image" description: | Build an image from a tar archive with a `Dockerfile` in it. The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. The build is canceled if the client drops the connection by quitting or being killed. operationId: "ImageBuild" consumes: - "application/octet-stream" produces: - "application/json" parameters: - name: "inputStream" in: "body" description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." schema: type: "string" format: "binary" - name: "dockerfile" in: "query" description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." type: "string" default: "Dockerfile" - name: "t" in: "query" description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." type: "string" - name: "extrahosts" in: "query" description: "Extra hosts to add to /etc/hosts" type: "string" - name: "remote" in: "query" description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." type: "string" - name: "q" in: "query" description: "Suppress verbose build output." type: "boolean" default: false - name: "nocache" in: "query" description: "Do not use the cache when building the image." type: "boolean" default: false - name: "cachefrom" in: "query" description: "JSON array of images used for build cache resolution." type: "string" - name: "pull" in: "query" description: "Attempt to pull the image even if an older image exists locally." type: "string" - name: "rm" in: "query" description: "Remove intermediate containers after a successful build." type: "boolean" default: true - name: "forcerm" in: "query" description: "Always remove intermediate containers, even upon failure." type: "boolean" default: false - name: "memory" in: "query" description: "Set memory limit for build." type: "integer" - name: "memswap" in: "query" description: "Total memory (memory + swap). Set as `-1` to disable swap." type: "integer" - name: "cpushares" in: "query" description: "CPU shares (relative weight)." type: "integer" - name: "cpusetcpus" in: "query" description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." type: "string" - name: "cpuperiod" in: "query" description: "The length of a CPU period in microseconds." type: "integer" - name: "cpuquota" in: "query" description: "Microseconds of CPU time that the container can get in a CPU period." type: "integer" - name: "buildargs" in: "query" description: > JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) type: "string" - name: "shmsize" in: "query" description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." type: "integer" - name: "squash" in: "query" description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" type: "boolean" - name: "labels" in: "query" description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." type: "string" - name: "networkmode" in: "query" description: | Sets the networking mode for the run commands during build. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken as a custom network's name or ID to which this container should connect to. type: "string" - name: "Content-type" in: "header" type: "string" enum: - "application/x-tar" default: "application/x-tar" - name: "X-Registry-Config" in: "header" description: | This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: ``` { "docker.example.com": { "username": "janedoe", "password": "hunter2" }, "https://index.docker.io/v1/": { "username": "mobydock", "password": "conta1n3rize14" } } ``` Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. type: "string" - name: "platform" in: "query" description: "Platform in the format os[/arch[/variant]]" type: "string" default: "" - name: "target" in: "query" description: "Target build stage" type: "string" default: "" - name: "outputs" in: "query" description: "BuildKit output configuration" type: "string" default: "" responses: 200: description: "no error" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /build/prune: post: summary: "Delete builder cache" produces: - "application/json" operationId: "BuildPrune" parameters: - name: "keep-storage" in: "query" description: "Amount of disk space in bytes to keep for cache" type: "integer" format: "int64" - name: "all" in: "query" type: "boolean" description: "Remove all types of build cache" - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters: - `until=<duration>`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') - `id=<id>` - `parent=<id>` - `type=<string>` - `description=<string>` - `inuse` - `shared` - `private` responses: 200: description: "No error" schema: type: "object" title: "BuildPruneResponse" properties: CachesDeleted: type: "array" items: description: "ID of build cache object" type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /images/create: post: summary: "Create an image" description: "Create an image by either pulling it from a registry or importing it." operationId: "ImageCreate" consumes: - "text/plain" - "application/octet-stream" produces: - "application/json" responses: 200: description: "no error" 404: description: "repository does not exist or no read access" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "fromImage" in: "query" description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." type: "string" - name: "fromSrc" in: "query" description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." type: "string" - name: "repo" in: "query" description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." type: "string" - name: "tag" in: "query" description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." type: "string" - name: "message" in: "query" description: "Set commit message for imported image." type: "string" - name: "inputImage" in: "body" description: "Image content if the value `-` has been specified in fromSrc query parameter" schema: type: "string" required: false - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "changes" in: "query" description: | Apply `Dockerfile` instructions to the image that is created, for example: `changes=ENV DEBUG=true`. Note that `ENV DEBUG=true` should be URI component encoded. Supported `Dockerfile` instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` type: "array" items: type: "string" - name: "platform" in: "query" description: "Platform in the format os[/arch[/variant]]" type: "string" default: "" tags: ["Image"] /images/{name}/json: get: summary: "Inspect an image" description: "Return low-level information about an image." operationId: "ImageInspect" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/Image" examples: application/json: Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" Comment: "" Os: "linux" Architecture: "amd64" Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" ContainerConfig: Tty: false Hostname: "e611e15f9c9d" Domainname: "" AttachStdout: false PublishService: "" AttachStdin: false OpenStdin: false StdinOnce: false NetworkDisabled: false OnBuild: [] Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" User: "" WorkingDir: "" MacAddress: "" AttachStderr: false Labels: com.example.license: "GPL" com.example.version: "1.0" com.example.vendor: "Acme" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Cmd: - "/bin/sh" - "-c" - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" DockerVersion: "1.9.0-dev" VirtualSize: 188359297 Size: 0 Author: "" Created: "2015-09-10T08:30:53.26995814Z" GraphDriver: Name: "aufs" Data: {} RepoDigests: - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" RepoTags: - "example:1.0" - "example:latest" - "example:stable" Config: Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" NetworkDisabled: false OnBuild: [] StdinOnce: false PublishService: "" AttachStdin: false OpenStdin: false Domainname: "" AttachStdout: false Tty: false Hostname: "e611e15f9c9d" Cmd: - "/bin/bash" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Labels: com.example.vendor: "Acme" com.example.version: "1.0" com.example.license: "GPL" MacAddress: "" AttachStderr: false WorkingDir: "" User: "" RootFS: Type: "layers" Layers: - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: someimage (tag: latest)" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or id" type: "string" required: true tags: ["Image"] /images/{name}/history: get: summary: "Get the history of an image" description: "Return parent layers of an image." operationId: "ImageHistory" produces: ["application/json"] responses: 200: description: "List of image layers" schema: type: "array" items: type: "object" x-go-name: HistoryResponseItem title: "HistoryResponseItem" description: "individual image layer information in response to ImageHistory operation" required: [Id, Created, CreatedBy, Tags, Size, Comment] properties: Id: type: "string" x-nullable: false Created: type: "integer" format: "int64" x-nullable: false CreatedBy: type: "string" x-nullable: false Tags: type: "array" items: type: "string" Size: type: "integer" format: "int64" x-nullable: false Comment: type: "string" x-nullable: false examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" Created: 1398108230 CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" Tags: - "ubuntu:lucid" - "ubuntu:10.04" Size: 182964289 Comment: "" - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" Created: 1398108222 CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <[email protected]> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" Tags: [] Size: 0 Comment: "" - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" Created: 1371157430 CreatedBy: "" Tags: - "scratch12:latest" - "scratch:latest" Size: 0 Comment: "Imported from -" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true tags: ["Image"] /images/{name}/push: post: summary: "Push an image" description: | Push an image to a registry. If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. The push is cancelled if the HTTP connection is closed. operationId: "ImagePush" consumes: - "application/octet-stream" responses: 200: description: "No error" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID." type: "string" required: true - name: "tag" in: "query" description: "The tag to associate with the image on the registry." type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration. Refer to the [authentication section](#section/Authentication) for details. type: "string" required: true tags: ["Image"] /images/{name}/tag: post: summary: "Tag an image" description: "Tag an image so that it becomes part of a repository." operationId: "ImageTag" responses: 201: description: "No error" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID to tag." type: "string" required: true - name: "repo" in: "query" description: "The repository to tag in. For example, `someuser/someimage`." type: "string" - name: "tag" in: "query" description: "The name of the new tag." type: "string" tags: ["Image"] /images/{name}: delete: summary: "Remove an image" description: | Remove an image, along with any untagged parent images that were referenced by that image. Images can't be removed if they have descendant images, are being used by a running container or are being used by a build. operationId: "ImageDelete" produces: ["application/json"] responses: 200: description: "The image was deleted successfully" schema: type: "array" items: $ref: "#/definitions/ImageDeleteResponseItem" examples: application/json: - Untagged: "3e2f21a89f" - Deleted: "3e2f21a89f" - Deleted: "53b4f83ac9" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true - name: "force" in: "query" description: "Remove the image even if it is being used by stopped containers or has other tags" type: "boolean" default: false - name: "noprune" in: "query" description: "Do not delete untagged parent images" type: "boolean" default: false tags: ["Image"] /images/search: get: summary: "Search images" description: "Search for an image on Docker Hub." operationId: "ImageSearch" produces: - "application/json" responses: 200: description: "No error" schema: type: "array" items: type: "object" title: "ImageSearchResponseItem" properties: description: type: "string" is_official: type: "boolean" is_automated: type: "boolean" name: type: "string" star_count: type: "integer" examples: application/json: - description: "" is_official: false is_automated: false name: "wma55/u1210sshd" star_count: 0 - description: "" is_official: false is_automated: false name: "jdswinbank/sshd" star_count: 0 - description: "" is_official: false is_automated: false name: "vgauthier/sshd" star_count: 0 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "term" in: "query" description: "Term to search" type: "string" required: true - name: "limit" in: "query" description: "Maximum number of results to return" type: "integer" - name: "filters" in: "query" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - `is-automated=(true|false)` - `is-official=(true|false)` - `stars=<number>` Matches images that has at least 'number' stars. type: "string" tags: ["Image"] /images/prune: post: summary: "Delete unused images" produces: - "application/json" operationId: "ImagePrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `dangling=<boolean>` When set to `true` (or `1`), prune only unused *and* untagged images. When set to `false` (or `0`), all unused images are pruned. - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "ImagePruneResponse" properties: ImagesDeleted: description: "Images that were deleted" type: "array" items: $ref: "#/definitions/ImageDeleteResponseItem" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /auth: post: summary: "Check auth configuration" description: | Validate credentials for a registry and, if available, get an identity token for accessing the registry without password. operationId: "SystemAuth" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "An identity token was generated successfully." schema: type: "object" title: "SystemAuthResponse" required: [Status] properties: Status: description: "The status of the authentication" type: "string" x-nullable: false IdentityToken: description: "An opaque token used to authenticate a user after a successful login" type: "string" x-nullable: false examples: application/json: Status: "Login Succeeded" IdentityToken: "9cbaf023786cd7..." 204: description: "No error" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "authConfig" in: "body" description: "Authentication to check" schema: $ref: "#/definitions/AuthConfig" tags: ["System"] /info: get: summary: "Get system information" operationId: "SystemInfo" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/SystemInfo" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /version: get: summary: "Get version" description: "Returns the version of Docker that is running and various information about the system that Docker is running on." operationId: "SystemVersion" produces: ["application/json"] responses: 200: description: "no error" schema: $ref: "#/definitions/SystemVersion" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /_ping: get: summary: "Ping" description: "This is a dummy endpoint you can use to test if the server is accessible." operationId: "SystemPing" produces: ["text/plain"] responses: 200: description: "no error" schema: type: "string" example: "OK" headers: API-Version: type: "string" description: "Max API Version the server supports" Builder-Version: type: "string" description: "Default version of docker image builder" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" headers: Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" tags: ["System"] head: summary: "Ping" description: "This is a dummy endpoint you can use to test if the server is accessible." operationId: "SystemPingHead" produces: ["text/plain"] responses: 200: description: "no error" schema: type: "string" example: "(empty)" headers: API-Version: type: "string" description: "Max API Version the server supports" Builder-Version: type: "string" description: "Default version of docker image builder" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /commit: post: summary: "Create a new image from a container" operationId: "ImageCommit" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "containerConfig" in: "body" description: "The container configuration" schema: $ref: "#/definitions/ContainerConfig" - name: "container" in: "query" description: "The ID or name of the container to commit" type: "string" - name: "repo" in: "query" description: "Repository name for the created image" type: "string" - name: "tag" in: "query" description: "Tag name for the create image" type: "string" - name: "comment" in: "query" description: "Commit message" type: "string" - name: "author" in: "query" description: "Author of the image (e.g., `John Hannibal Smith <[email protected]>`)" type: "string" - name: "pause" in: "query" description: "Whether to pause the container before committing" type: "boolean" default: true - name: "changes" in: "query" description: "`Dockerfile` instructions to apply while committing" type: "string" tags: ["Image"] /events: get: summary: "Monitor events" description: | Stream real-time events from the server. Various objects within Docker report events when something happens to them. Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` The Docker daemon reports these events: `reload` Services report these events: `create`, `update`, and `remove` Nodes report these events: `create`, `update`, and `remove` Secrets report these events: `create`, `update`, and `remove` Configs report these events: `create`, `update`, and `remove` The Builder reports `prune` events operationId: "SystemEvents" produces: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "SystemEventsResponse" properties: Type: description: "The type of object emitting the event" type: "string" Action: description: "The type of event" type: "string" Actor: type: "object" properties: ID: description: "The ID of the object emitting the event" type: "string" Attributes: description: "Various key/value attributes of the object, depending on its type" type: "object" additionalProperties: type: "string" time: description: "Timestamp of event" type: "integer" timeNano: description: "Timestamp of event, with nanosecond accuracy" type: "integer" format: "int64" examples: application/json: Type: "container" Action: "create" Actor: ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" Attributes: com.example.some-label: "some-label-value" image: "alpine" name: "my-container" time: 1461943101 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "since" in: "query" description: "Show events created since this timestamp then stream new events." type: "string" - name: "until" in: "query" description: "Show events created until this timestamp then stop streaming." type: "string" - name: "filters" in: "query" description: | A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: - `config=<string>` config name or ID - `container=<string>` container name or ID - `daemon=<string>` daemon name or ID - `event=<string>` event type - `image=<string>` image name or ID - `label=<string>` image or container label - `network=<string>` network name or ID - `node=<string>` node ID - `plugin`=<string> plugin name or ID - `scope`=<string> local or swarm - `secret=<string>` secret name or ID - `service=<string>` service name or ID - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` - `volume=<string>` volume name type: "string" tags: ["System"] /system/df: get: summary: "Get data usage information" operationId: "SystemDataUsage" responses: 200: description: "no error" schema: type: "object" title: "SystemDataUsageResponse" properties: LayersSize: type: "integer" format: "int64" Images: type: "array" items: $ref: "#/definitions/ImageSummary" Containers: type: "array" items: $ref: "#/definitions/ContainerSummary" Volumes: type: "array" items: $ref: "#/definitions/Volume" BuildCache: type: "array" items: $ref: "#/definitions/BuildCache" example: LayersSize: 1092588 Images: - Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" ParentId: "" RepoTags: - "busybox:latest" RepoDigests: - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" Created: 1466724217 Size: 1092588 SharedSize: 0 VirtualSize: 1092588 Labels: {} Containers: 1 Containers: - Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" Names: - "/top" Image: "busybox" ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" Command: "top" Created: 1472592424 Ports: [] SizeRootFs: 1092588 Labels: {} State: "exited" Status: "Exited (0) 56 minutes ago" HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: IPAMConfig: null Links: null Aliases: null NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" Gateway: "172.18.0.1" IPAddress: "172.18.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:12:00:02" Mounts: [] Volumes: - Name: "my-volume" Driver: "local" Mountpoint: "/var/lib/docker/volumes/my-volume/_data" Labels: null Scope: "local" Options: null UsageData: Size: 10920104 RefCount: 2 BuildCache: - ID: "hw53o5aio51xtltp5xjp8v7fx" Parent: "" Type: "regular" Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" InUse: false Shared: true Size: 0 CreatedAt: "2021-06-28T13:31:01.474619385Z" LastUsedAt: "2021-07-07T22:02:32.738075951Z" UsageCount: 26 - ID: "ndlpt0hhvkqcdfkputsk4cq9c" Parent: "hw53o5aio51xtltp5xjp8v7fx" Type: "regular" Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" InUse: false Shared: true Size: 51 CreatedAt: "2021-06-28T13:31:03.002625487Z" LastUsedAt: "2021-07-07T22:02:32.773909517Z" UsageCount: 26 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "type" in: "query" description: | Object types, for which to compute and return data. type: "array" collectionFormat: multi items: type: "string" enum: ["container", "image", "volume", "build-cache"] tags: ["System"] /images/{name}/get: get: summary: "Export an image" description: | Get a tarball containing all images and metadata for a repository. If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. ### Image tarball format An image tarball contains one directory per image layer (named using its long ID), each containing these files: - `VERSION`: currently `1.0` - the file format version - `json`: detailed layer information, similar to `docker inspect layer_id` - `layer.tar`: A tarfile containing the filesystem changes in this layer The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ```json { "hello-world": { "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" } } ``` operationId: "ImageGet" produces: - "application/x-tar" responses: 200: description: "no error" schema: type: "string" format: "binary" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true tags: ["Image"] /images/get: get: summary: "Export several images" description: | Get a tarball containing all images and metadata for several image repositories. For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageGetAll" produces: - "application/x-tar" responses: 200: description: "no error" schema: type: "string" format: "binary" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "names" in: "query" description: "Image names to filter by" type: "array" items: type: "string" tags: ["Image"] /images/load: post: summary: "Import images" description: | Load a set of images and tags into a repository. For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageLoad" consumes: - "application/x-tar" produces: - "application/json" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "imagesTarball" in: "body" description: "Tar archive containing images" schema: type: "string" format: "binary" - name: "quiet" in: "query" description: "Suppress progress details during load." type: "boolean" default: false tags: ["Image"] /containers/{id}/exec: post: summary: "Create an exec instance" description: "Run a command inside a running container." operationId: "ContainerExec" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "container is paused" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "execConfig" in: "body" description: "Exec configuration" schema: type: "object" title: "ExecConfig" properties: AttachStdin: type: "boolean" description: "Attach to `stdin` of the exec command." AttachStdout: type: "boolean" description: "Attach to `stdout` of the exec command." AttachStderr: type: "boolean" description: "Attach to `stderr` of the exec command." DetachKeys: type: "string" description: | Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Tty: type: "boolean" description: "Allocate a pseudo-TTY." Env: description: | A list of environment variables in the form `["VAR=value", ...]`. type: "array" items: type: "string" Cmd: type: "array" description: "Command to run, as a string or array of strings." items: type: "string" Privileged: type: "boolean" description: "Runs the exec process with extended privileges." default: false User: type: "string" description: | The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`. WorkingDir: type: "string" description: | The working directory for the exec process inside the container. example: AttachStdin: false AttachStdout: true AttachStderr: true DetachKeys: "ctrl-p,ctrl-q" Tty: false Cmd: - "date" Env: - "FOO=bar" - "BAZ=quux" required: true - name: "id" in: "path" description: "ID or name of container" type: "string" required: true tags: ["Exec"] /exec/{id}/start: post: summary: "Start an exec instance" description: | Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command. operationId: "ExecStart" consumes: - "application/json" produces: - "application/vnd.docker.raw-stream" responses: 200: description: "No error" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Container is stopped or paused" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "execStartConfig" in: "body" schema: type: "object" title: "ExecStartConfig" properties: Detach: type: "boolean" description: "Detach from the command." Tty: type: "boolean" description: "Allocate a pseudo-TTY." example: Detach: false Tty: false - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" tags: ["Exec"] /exec/{id}/resize: post: summary: "Resize an exec instance" description: | Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance. operationId: "ExecResize" responses: 201: description: "No error" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" - name: "h" in: "query" description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" description: "Width of the TTY session in characters" type: "integer" tags: ["Exec"] /exec/{id}/json: get: summary: "Inspect an exec instance" description: "Return low-level information about an exec instance." operationId: "ExecInspect" produces: - "application/json" responses: 200: description: "No error" schema: type: "object" title: "ExecInspectResponse" properties: CanRemove: type: "boolean" DetachKeys: type: "string" ID: type: "string" Running: type: "boolean" ExitCode: type: "integer" ProcessConfig: $ref: "#/definitions/ProcessConfig" OpenStdin: type: "boolean" OpenStderr: type: "boolean" OpenStdout: type: "boolean" ContainerID: type: "string" Pid: type: "integer" description: "The system process ID for the exec process." examples: application/json: CanRemove: false ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" DetachKeys: "" ExitCode: 2 ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" OpenStderr: true OpenStdin: true OpenStdout: true ProcessConfig: arguments: - "-c" - "exit 2" entrypoint: "sh" privileged: false tty: true user: "1000" Running: false Pid: 42000 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" tags: ["Exec"] /volumes: get: summary: "List volumes" operationId: "VolumeList" produces: ["application/json"] responses: 200: description: "Summary volume data that matches the query" schema: type: "object" title: "VolumeListResponse" description: "Volume list response" required: [Volumes, Warnings] properties: Volumes: type: "array" x-nullable: false description: "List of volumes" items: $ref: "#/definitions/Volume" Warnings: type: "array" x-nullable: false description: | Warnings that occurred when fetching the list of volumes. items: type: "string" examples: application/json: Volumes: - CreatedAt: "2017-07-19T12:00:26Z" Name: "tardis" Driver: "local" Mountpoint: "/var/lib/docker/volumes/tardis" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Scope: "local" Options: device: "tmpfs" o: "size=100m,uid=1000" type: "tmpfs" Warnings: [] 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. Available filters: - `dangling=<boolean>` When set to `true` (or `1`), returns all volumes that are not in use by a container. When set to `false` (or `0`), only volumes that are in use by one or more containers are returned. - `driver=<volume-driver-name>` Matches volumes based on their driver. - `label=<key>` or `label=<key>:<value>` Matches volumes based on the presence of a `label` alone or a `label` and a value. - `name=<volume-name>` Matches all or part of a volume name. type: "string" format: "json" tags: ["Volume"] /volumes/create: post: summary: "Create a volume" operationId: "VolumeCreate" consumes: ["application/json"] produces: ["application/json"] responses: 201: description: "The volume was created successfully" schema: $ref: "#/definitions/Volume" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "volumeConfig" in: "body" required: true description: "Volume configuration" schema: type: "object" description: "Volume configuration" title: "VolumeConfig" properties: Name: description: | The new volume's name. If not specified, Docker generates a name. type: "string" x-nullable: false Driver: description: "Name of the volume driver to use." type: "string" default: "local" x-nullable: false DriverOpts: description: | A mapping of driver options and values. These options are passed directly to the driver and are driver specific. type: "object" additionalProperties: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: Name: "tardis" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Driver: "custom" tags: ["Volume"] /volumes/{name}: get: summary: "Inspect a volume" operationId: "VolumeInspect" produces: ["application/json"] responses: 200: description: "No error" schema: $ref: "#/definitions/Volume" 404: description: "No such volume" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" required: true description: "Volume name or ID" type: "string" tags: ["Volume"] delete: summary: "Remove a volume" description: "Instruct the driver to remove the volume." operationId: "VolumeDelete" responses: 204: description: "The volume was removed" 404: description: "No such volume or volume driver" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Volume is in use and cannot be removed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" required: true description: "Volume name or ID" type: "string" - name: "force" in: "query" description: "Force the removal of the volume" type: "boolean" default: false tags: ["Volume"] /volumes/prune: post: summary: "Delete unused volumes" produces: - "application/json" operationId: "VolumePrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "VolumePruneResponse" properties: VolumesDeleted: description: "Volumes that were deleted" type: "array" items: type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Volume"] /networks: get: summary: "List networks" description: | Returns a list of networks. For details on the format, see the [network inspect endpoint](#operation/NetworkInspect). Note that it uses a different, smaller representation of a network than inspecting a single network. For example, the list of containers attached to the network is not propagated in API versions 1.28 and up. operationId: "NetworkList" produces: - "application/json" responses: 200: description: "No error" schema: type: "array" items: $ref: "#/definitions/Network" examples: application/json: - Name: "bridge" Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" Created: "2016-10-19T06:21:00.416543526Z" Scope: "local" Driver: "bridge" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: - Subnet: "172.17.0.0/16" Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" - Name: "none" Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "null" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: [] Containers: {} Options: {} - Name: "host" Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "host" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: [] Containers: {} Options: {} 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: - `dangling=<boolean>` When set to `true` (or `1`), returns all networks that are not in use by a container. When set to `false` (or `0`), only networks that are in use by one or more containers are returned. - `driver=<driver-name>` Matches a network's driver. - `id=<network-id>` Matches all or part of a network ID. - `label=<key>` or `label=<key>=<value>` of a network label. - `name=<network-name>` Matches all or part of a network name. - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. type: "string" tags: ["Network"] /networks/{id}: get: summary: "Inspect a network" operationId: "NetworkInspect" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/Network" 404: description: "Network not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "verbose" in: "query" description: "Detailed inspect output for troubleshooting" type: "boolean" default: false - name: "scope" in: "query" description: "Filter the network by scope (swarm, global, or local)" type: "string" tags: ["Network"] delete: summary: "Remove a network" operationId: "NetworkDelete" responses: 204: description: "No error" 403: description: "operation not supported for pre-defined networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such network" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" tags: ["Network"] /networks/create: post: summary: "Create a network" operationId: "NetworkCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "No error" schema: type: "object" title: "NetworkCreateResponse" properties: Id: description: "The ID of the created network." type: "string" Warning: type: "string" example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" 403: description: "operation not supported for pre-defined networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "plugin not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "networkConfig" in: "body" description: "Network configuration" required: true schema: type: "object" title: "NetworkCreateRequest" required: ["Name"] properties: Name: description: "The network's name." type: "string" CheckDuplicate: description: | Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions. type: "boolean" Driver: description: "Name of the network driver plugin to use." type: "string" default: "bridge" Internal: description: "Restrict external access to the network." type: "boolean" Attachable: description: | Globally scoped network is manually attachable by regular containers from workers in swarm mode. type: "boolean" Ingress: description: | Ingress network is the network which provides the routing-mesh in swarm mode. type: "boolean" IPAM: description: "Optional custom IP scheme for the network." $ref: "#/definitions/IPAM" EnableIPv6: description: "Enable IPv6 on the network." type: "boolean" Options: description: "Network specific options to be used by the drivers." type: "object" additionalProperties: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: Name: "isolated_nw" CheckDuplicate: false Driver: "bridge" EnableIPv6: true IPAM: Driver: "default" Config: - Subnet: "172.20.0.0/16" IPRange: "172.20.10.0/24" Gateway: "172.20.10.11" - Subnet: "2001:db8:abcd::/64" Gateway: "2001:db8:abcd::1011" Options: foo: "bar" Internal: true Attachable: false Ingress: false Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" tags: ["Network"] /networks/{id}/connect: post: summary: "Connect a container to a network" operationId: "NetworkConnect" consumes: - "application/json" responses: 200: description: "No error" 403: description: "Operation not supported for swarm scoped networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Network or container not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "container" in: "body" required: true schema: type: "object" title: "NetworkConnectRequest" properties: Container: type: "string" description: "The ID or name of the container to connect to the network." EndpointConfig: $ref: "#/definitions/EndpointSettings" example: Container: "3613f73ba0e4" EndpointConfig: IPAMConfig: IPv4Address: "172.24.56.89" IPv6Address: "2001:db8::5689" tags: ["Network"] /networks/{id}/disconnect: post: summary: "Disconnect a container from a network" operationId: "NetworkDisconnect" consumes: - "application/json" responses: 200: description: "No error" 403: description: "Operation not supported for swarm scoped networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Network or container not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "container" in: "body" required: true schema: type: "object" title: "NetworkDisconnectRequest" properties: Container: type: "string" description: | The ID or name of the container to disconnect from the network. Force: type: "boolean" description: | Force the container to disconnect from the network. tags: ["Network"] /networks/prune: post: summary: "Delete unused networks" produces: - "application/json" operationId: "NetworkPrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "NetworkPruneResponse" properties: NetworksDeleted: description: "Networks that were deleted" type: "array" items: type: "string" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Network"] /plugins: get: summary: "List plugins" operationId: "PluginList" description: "Returns information about installed plugins." produces: ["application/json"] responses: 200: description: "No error" schema: type: "array" items: $ref: "#/definitions/Plugin" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: - `capability=<capability name>` - `enable=<true>|<false>` tags: ["Plugin"] /plugins/privileges: get: summary: "Get plugin privileges" operationId: "GetPluginPrivileges" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/PluginPrivilegeItem" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "remote" in: "query" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: - "Plugin" /plugins/pull: post: summary: "Install a plugin" operationId: "PluginPull" description: | Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). produces: - "application/json" responses: 204: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "remote" in: "query" description: | Remote reference for plugin to install. The `:latest` tag is optional, and is used as the default if omitted. required: true type: "string" - name: "name" in: "query" description: | Local name for the pulled plugin. The `:latest` tag is optional, and is used as the default if omitted. required: false type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration to use when pulling a plugin from a registry. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "body" in: "body" schema: type: "array" items: $ref: "#/definitions/PluginPrivilegeItem" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" tags: ["Plugin"] /plugins/{name}/json: get: summary: "Inspect a plugin" operationId: "PluginInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Plugin" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: ["Plugin"] /plugins/{name}: delete: summary: "Remove a plugin" operationId: "PluginDelete" responses: 200: description: "no error" schema: $ref: "#/definitions/Plugin" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "force" in: "query" description: | Disable the plugin before removing. This may result in issues if the plugin is in use by a container. type: "boolean" default: false tags: ["Plugin"] /plugins/{name}/enable: post: summary: "Enable a plugin" operationId: "PluginEnable" responses: 200: description: "no error" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "timeout" in: "query" description: "Set the HTTP client timeout (in seconds)" type: "integer" default: 0 tags: ["Plugin"] /plugins/{name}/disable: post: summary: "Disable a plugin" operationId: "PluginDisable" responses: 200: description: "no error" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: ["Plugin"] /plugins/{name}/upgrade: post: summary: "Upgrade a plugin" operationId: "PluginUpgrade" responses: 204: description: "no error" 404: description: "plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "remote" in: "query" description: | Remote reference to upgrade to. The `:latest` tag is optional, and is used as the default if omitted. required: true type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration to use when pulling a plugin from a registry. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "body" in: "body" schema: type: "array" items: $ref: "#/definitions/PluginPrivilegeItem" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" tags: ["Plugin"] /plugins/create: post: summary: "Create a plugin" operationId: "PluginCreate" consumes: - "application/x-tar" responses: 204: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "query" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "tarContext" in: "body" description: "Path to tar containing plugin rootfs and manifest" schema: type: "string" format: "binary" tags: ["Plugin"] /plugins/{name}/push: post: summary: "Push a plugin" operationId: "PluginPush" description: | Push a plugin to the registry. parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" responses: 200: description: "no error" 404: description: "plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Plugin"] /plugins/{name}/set: post: summary: "Configure a plugin" operationId: "PluginSet" consumes: - "application/json" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "body" in: "body" schema: type: "array" items: type: "string" example: ["DEBUG=1"] responses: 204: description: "No error" 404: description: "Plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Plugin"] /nodes: get: summary: "List nodes" operationId: "NodeList" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Node" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). Available filters: - `id=<node id>` - `label=<engine label>` - `membership=`(`accepted`|`pending`)` - `name=<node name>` - `node.label=<node label>` - `role=`(`manager`|`worker`)` type: "string" tags: ["Node"] /nodes/{id}: get: summary: "Inspect a node" operationId: "NodeInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Node" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the node" type: "string" required: true tags: ["Node"] delete: summary: "Delete a node" operationId: "NodeDelete" responses: 200: description: "no error" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the node" type: "string" required: true - name: "force" in: "query" description: "Force remove a node from the swarm" default: false type: "boolean" tags: ["Node"] /nodes/{id}/update: post: summary: "Update a node" operationId: "NodeUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID of the node" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/NodeSpec" - name: "version" in: "query" description: | The version number of the node object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Node"] /swarm: get: summary: "Inspect swarm" operationId: "SwarmInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Swarm" 404: description: "no such swarm" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /swarm/init: post: summary: "Initialize a new swarm" operationId: "SwarmInit" produces: - "application/json" - "text/plain" responses: 200: description: "no error" schema: description: "The node ID" type: "string" example: "7v2t30z9blmxuhnyo6s4cpenp" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is already part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmInitRequest" properties: ListenAddr: description: | Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used. type: "string" AdvertiseAddr: description: | Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`, or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is possible to separate the container data traffic from the management traffic of the cluster. type: "string" DataPathPort: description: | DataPathPort specifies the data path port number for data traffic. Acceptable port range is 1024 to 49151. if no port is set or is set to 0, default port 4789 will be used. type: "integer" format: "uint32" DefaultAddrPool: description: | Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" example: ["10.10.0.0/16", "20.20.0.0/16"] ForceNewCluster: description: "Force creation of a new swarm." type: "boolean" SubnetSize: description: | SubnetSize specifies the subnet size of the networks created from the default subnet pool. type: "integer" format: "uint32" Spec: $ref: "#/definitions/SwarmSpec" example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" DataPathPort: 4789 DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] SubnetSize: 24 ForceNewCluster: false Spec: Orchestration: {} Raft: {} Dispatcher: {} CAConfig: {} EncryptionConfig: AutoLockManagers: false tags: ["Swarm"] /swarm/join: post: summary: "Join an existing swarm" operationId: "SwarmJoin" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is already part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmJoinRequest" properties: ListenAddr: description: | Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). type: "string" AdvertiseAddr: description: | Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`, or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is possible to separate the container data traffic from the management traffic of the cluster. type: "string" RemoteAddrs: description: | Addresses of manager nodes already participating in the swarm. type: "array" items: type: "string" JoinToken: description: "Secret token for joining this swarm." type: "string" example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" RemoteAddrs: - "node1:2377" JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" tags: ["Swarm"] /swarm/leave: post: summary: "Leave a swarm" operationId: "SwarmLeave" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "force" description: | Force leave swarm, even if this is the last manager or that it will break the cluster. in: "query" type: "boolean" default: false tags: ["Swarm"] /swarm/update: post: summary: "Update a swarm" operationId: "SwarmUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: $ref: "#/definitions/SwarmSpec" - name: "version" in: "query" description: | The version number of the swarm object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true - name: "rotateWorkerToken" in: "query" description: "Rotate the worker join token." type: "boolean" default: false - name: "rotateManagerToken" in: "query" description: "Rotate the manager join token." type: "boolean" default: false - name: "rotateManagerUnlockKey" in: "query" description: "Rotate the manager unlock key." type: "boolean" default: false tags: ["Swarm"] /swarm/unlockkey: get: summary: "Get the unlock key" operationId: "SwarmUnlockkey" consumes: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "UnlockKeyResponse" properties: UnlockKey: description: "The swarm's unlock key." type: "string" example: UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /swarm/unlock: post: summary: "Unlock a locked manager" operationId: "SwarmUnlock" consumes: - "application/json" produces: - "application/json" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmUnlockRequest" properties: UnlockKey: description: "The swarm's unlock key." type: "string" example: UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /services: get: summary: "List services" operationId: "ServiceList" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Service" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: - `id=<service id>` - `label=<service label>` - `mode=["replicated"|"global"]` - `name=<service name>` - name: "status" in: "query" type: "boolean" description: | Include service status, with count of running and desired tasks. tags: ["Service"] /services/create: post: summary: "Create a service" operationId: "ServiceCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: type: "object" title: "ServiceCreateResponse" properties: ID: description: "The ID of the created service." type: "string" Warning: description: "Optional warning message" type: "string" example: ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 403: description: "network is not eligible for services" schema: $ref: "#/definitions/ErrorResponse" 409: description: "name conflicts with an existing service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: allOf: - $ref: "#/definitions/ServiceSpec" - type: "object" example: Name: "web" TaskTemplate: ContainerSpec: Image: "nginx:alpine" Mounts: - ReadOnly: true Source: "web-data" Target: "/usr/share/nginx/html" Type: "volume" VolumeOptions: DriverConfig: {} Labels: com.example.something: "something-value" Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] User: "33" DNSConfig: Nameservers: ["8.8.8.8"] Search: ["example.org"] Options: ["timeout:3"] Secrets: - File: Name: "www.example.org.key" UID: "33" GID: "33" Mode: 384 SecretID: "fpjqlhnwb19zds35k8wn80lq9" SecretName: "example_org_domain_key" LogDriver: Name: "json-file" Options: max-file: "3" max-size: "10M" Placement: {} Resources: Limits: MemoryBytes: 104857600 Reservations: {} RestartPolicy: Condition: "on-failure" Delay: 10000000000 MaxAttempts: 10 Mode: Replicated: Replicas: 4 UpdateConfig: Parallelism: 2 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Ports: - Protocol: "tcp" PublishedPort: 8080 TargetPort: 80 Labels: foo: "bar" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration for pulling from private registries. Refer to the [authentication section](#section/Authentication) for details. type: "string" tags: ["Service"] /services/{id}: get: summary: "Inspect a service" operationId: "ServiceInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Service" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" - name: "insertDefaults" in: "query" description: "Fill empty fields with default values." type: "boolean" default: false tags: ["Service"] delete: summary: "Delete a service" operationId: "ServiceDelete" responses: 200: description: "no error" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" tags: ["Service"] /services/{id}/update: post: summary: "Update a service" operationId: "ServiceUpdate" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "no error" schema: $ref: "#/definitions/ServiceUpdateResponse" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" - name: "body" in: "body" required: true schema: allOf: - $ref: "#/definitions/ServiceSpec" - type: "object" example: Name: "top" TaskTemplate: ContainerSpec: Image: "busybox" Args: - "top" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ForceUpdate: 0 Mode: Replicated: Replicas: 1 UpdateConfig: Parallelism: 2 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Mode: "vip" - name: "version" in: "query" description: | The version number of the service object being updated. This is required to avoid conflicting writes. This version number should be the value as currently set on the service *before* the update. You can find the current version by calling `GET /services/{id}` required: true type: "integer" - name: "registryAuthFrom" in: "query" description: | If the `X-Registry-Auth` header is not specified, this parameter indicates where to find registry authorization credentials. type: "string" enum: ["spec", "previous-spec"] default: "spec" - name: "rollback" in: "query" description: | Set to this parameter to `previous` to cause a server-side rollback to the previous service spec. The supplied spec will be ignored in this case. type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration for pulling from private registries. Refer to the [authentication section](#section/Authentication) for details. type: "string" tags: ["Service"] /services/{id}/logs: get: summary: "Get service logs" description: | Get `stdout` and `stderr` logs from a service. See also [`/containers/{id}/logs`](#operation/ContainerLogs). **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "ServiceLogs" responses: 200: description: "logs returned as a stream in response body" schema: type: "string" format: "binary" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such service: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the service" type: "string" - name: "details" in: "query" description: "Show service context and extra details provided to logs." type: "boolean" default: false - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Service"] /tasks: get: summary: "List tasks" operationId: "TaskList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Task" example: - ID: "0kzzo1i0y4jz6027t0k7aezc7" Version: Index: 71 CreatedAt: "2016-06-07T21:07:31.171892745Z" UpdatedAt: "2016-06-07T21:07:31.376370513Z" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:31.290032978Z" State: "running" Message: "started" ContainerStatus: ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" PID: 677 DesiredState: "running" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.10/16" - ID: "1yljwbmlr8er2waf8orvqpwms" Version: Index: 30 CreatedAt: "2016-06-07T21:07:30.019104782Z" UpdatedAt: "2016-06-07T21:07:30.231958098Z" Name: "hopeful_cori" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:30.202183143Z" State: "shutdown" Message: "shutdown" ContainerStatus: ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" DesiredState: "shutdown" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.5/16" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: - `desired-state=(running | shutdown | accepted)` - `id=<task id>` - `label=key` or `label="key=value"` - `name=<task name>` - `node=<node id or name>` - `service=<service name>` tags: ["Task"] /tasks/{id}: get: summary: "Inspect a task" operationId: "TaskInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Task" 404: description: "no such task" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID of the task" required: true type: "string" tags: ["Task"] /tasks/{id}/logs: get: summary: "Get task logs" description: | Get `stdout` and `stderr` logs from a task. See also [`/containers/{id}/logs`](#operation/ContainerLogs). **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "TaskLogs" responses: 200: description: "logs returned as a stream in response body" schema: type: "string" format: "binary" 404: description: "no such task" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such task: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID of the task" type: "string" - name: "details" in: "query" description: "Show task context and extra details provided to logs." type: "boolean" default: false - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Task"] /secrets: get: summary: "List secrets" operationId: "SecretList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Secret" example: - ID: "blt1owaxmitz71s9v5zh81zun" Version: Index: 85 CreatedAt: "2017-07-20T13:55:28.678958722Z" UpdatedAt: "2017-07-20T13:55:28.678958722Z" Spec: Name: "mysql-passwd" Labels: some.label: "some.value" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" - ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" Labels: foo: "bar" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: - `id=<secret id>` - `label=<key> or label=<key>=value` - `name=<secret name>` - `names=<secret name>` tags: ["Secret"] /secrets/create: post: summary: "Create a secret" operationId: "SecretCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 409: description: "name conflicts with an existing object" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" schema: allOf: - $ref: "#/definitions/SecretSpec" - type: "object" example: Name: "app-key.crt" Labels: foo: "bar" Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" tags: ["Secret"] /secrets/{id}: get: summary: "Inspect a secret" operationId: "SecretInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Secret" examples: application/json: ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" Labels: foo: "bar" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" 404: description: "secret not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the secret" tags: ["Secret"] delete: summary: "Delete a secret" operationId: "SecretDelete" produces: - "application/json" responses: 204: description: "no error" 404: description: "secret not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the secret" tags: ["Secret"] /secrets/{id}/update: post: summary: "Update a Secret" operationId: "SecretUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such secret" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the secret" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/SecretSpec" description: | The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values. - name: "version" in: "query" description: | The version number of the secret object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Secret"] /configs: get: summary: "List configs" operationId: "ConfigList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Config" example: - ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "server.conf" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters: - `id=<config id>` - `label=<key> or label=<key>=value` - `name=<config name>` - `names=<config name>` tags: ["Config"] /configs/create: post: summary: "Create a config" operationId: "ConfigCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 409: description: "name conflicts with an existing object" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" schema: allOf: - $ref: "#/definitions/ConfigSpec" - type: "object" example: Name: "server.conf" Labels: foo: "bar" Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" tags: ["Config"] /configs/{id}: get: summary: "Inspect a config" operationId: "ConfigInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Config" examples: application/json: ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" 404: description: "config not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the config" tags: ["Config"] delete: summary: "Delete a config" operationId: "ConfigDelete" produces: - "application/json" responses: 204: description: "no error" 404: description: "config not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the config" tags: ["Config"] /configs/{id}/update: post: summary: "Update a Config" operationId: "ConfigUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such config" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the config" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/ConfigSpec" description: | The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values. - name: "version" in: "query" description: | The version number of the config object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Config"] /distribution/{name}/json: get: summary: "Get image information from the registry" description: | Return image digest and platform information by contacting the registry. operationId: "DistributionInspect" produces: - "application/json" responses: 200: description: "descriptor and platform information" schema: type: "object" x-go-name: DistributionInspect title: "DistributionInspectResponse" required: [Descriptor, Platforms] properties: Descriptor: type: "object" description: | A descriptor struct containing digest, media type, and size. properties: mediaType: type: "string" size: type: "integer" format: "int64" digest: type: "string" urls: type: "array" items: type: "string" Platforms: type: "array" description: | An array containing all platforms supported by the image. items: type: "object" properties: architecture: type: "string" os: type: "string" os.version: type: "string" os.features: type: "array" items: type: "string" variant: type: "string" Features: type: "array" items: type: "string" examples: application/json: Descriptor: MediaType: "application/vnd.docker.distribution.manifest.v2+json" Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" Size: 3987495 URLs: - "" Platforms: - architecture: "amd64" os: "linux" os.version: "" os.features: - "" variant: "" Features: - "" 401: description: "Failed authentication or no image found" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: someimage (tag: latest)" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or id" type: "string" required: true tags: ["Distribution"] /session: post: summary: "Initialize interactive session" description: | Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. ### Hijacking This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. For example, the client sends this request to upgrade the connection: ``` POST /session HTTP/1.1 Upgrade: h2c Connection: Upgrade ``` The Docker daemon responds with a `101 UPGRADED` response follow with the raw stream: ``` HTTP/1.1 101 UPGRADED Connection: Upgrade Upgrade: h2c ``` operationId: "Session" produces: - "application/vnd.docker.raw-stream" responses: 101: description: "no error, hijacking successful" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Session"]
# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. # # This is used for generating API documentation and the types used by the # client/server. See api/README.md for more information. # # Some style notes: # - This file is used by ReDoc, which allows GitHub Flavored Markdown in # descriptions. # - There is no maximum line length, for ease of editing and pretty diffs. # - operationIds are in the format "NounVerb", with a singular noun. swagger: "2.0" schemes: - "http" - "https" produces: - "application/json" - "text/plain" consumes: - "application/json" - "text/plain" basePath: "/v1.42" info: title: "Docker Engine API" version: "1.42" x-logo: url: "https://docs.docker.com/images/logo-docker-main.png" description: | The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. # Errors The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: ``` { "message": "page not found" } ``` # Versioning The API is usually changed in each release, so API calls are versioned to ensure that clients don't break. To lock to a specific version of the API, you prefix the URL with its version, for example, call `/v1.30/info` to use the v1.30 version of the `/info` endpoint. If the API version specified in the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. If you omit the version-prefix, the current version of the API (v1.42) is used. For example, calling `/info` is the same as calling `/v1.42/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer daemons. # Authentication Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) (JSON) string with the following structure: ``` { "username": "string", "password": "string", "email": "string", "serveraddress": "string" } ``` The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: ``` { "identitytoken": "9cbaf023786cd7..." } ``` # The tags on paths define the menu sections in the ReDoc documentation, so # the usage of tags must make sense for that: # - They should be singular, not plural. # - There should not be too many tags, or the menu becomes unwieldy. For # example, it is preferable to add a path to the "System" tag instead of # creating a tag with a single path in it. # - The order of tags in this list defines the order in the menu. tags: # Primary objects - name: "Container" x-displayName: "Containers" description: | Create and manage containers. - name: "Image" x-displayName: "Images" - name: "Network" x-displayName: "Networks" description: | Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/network/) for more information. - name: "Volume" x-displayName: "Volumes" description: | Create and manage persistent storage that can be attached to containers. - name: "Exec" x-displayName: "Exec" description: | Run new commands inside running containers. Refer to the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. # Swarm things - name: "Swarm" x-displayName: "Swarm" description: | Engines can be clustered together in a swarm. Refer to the [swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. - name: "Node" x-displayName: "Nodes" description: | Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Service" x-displayName: "Services" description: | Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Task" x-displayName: "Tasks" description: | A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. - name: "Secret" x-displayName: "Secrets" description: | Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. - name: "Config" x-displayName: "Configs" description: | Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work. # System things - name: "Plugin" x-displayName: "Plugins" - name: "System" x-displayName: "System" definitions: Port: type: "object" description: "An open port on a container" required: [PrivatePort, Type] properties: IP: type: "string" format: "ip-address" description: "Host IP address that the container's port is mapped to" PrivatePort: type: "integer" format: "uint16" x-nullable: false description: "Port on the container" PublicPort: type: "integer" format: "uint16" description: "Port exposed on the host" Type: type: "string" x-nullable: false enum: ["tcp", "udp", "sctp"] example: PrivatePort: 8080 PublicPort: 80 Type: "tcp" MountPoint: type: "object" description: "A mount point inside a container" properties: Type: type: "string" Name: type: "string" Source: type: "string" Destination: type: "string" Driver: type: "string" Mode: type: "string" RW: type: "boolean" Propagation: type: "string" DeviceMapping: type: "object" description: "A device mapping between the host and container" properties: PathOnHost: type: "string" PathInContainer: type: "string" CgroupPermissions: type: "string" example: PathOnHost: "/dev/deviceName" PathInContainer: "/dev/deviceName" CgroupPermissions: "mrw" DeviceRequest: type: "object" description: "A request for devices to be sent to device drivers" properties: Driver: type: "string" example: "nvidia" Count: type: "integer" example: -1 DeviceIDs: type: "array" items: type: "string" example: - "0" - "1" - "GPU-fef8089b-4820-abfc-e83e-94318197576e" Capabilities: description: | A list of capabilities; an OR list of AND lists of capabilities. type: "array" items: type: "array" items: type: "string" example: # gpu AND nvidia AND compute - ["gpu", "nvidia", "compute"] Options: description: | Driver-specific options, specified as a key/value pairs. These options are passed directly to the driver. type: "object" additionalProperties: type: "string" ThrottleDevice: type: "object" properties: Path: description: "Device path" type: "string" Rate: description: "Rate" type: "integer" format: "int64" minimum: 0 Mount: type: "object" properties: Target: description: "Container path." type: "string" Source: description: "Mount source (e.g. a volume name, a host path)." type: "string" Type: description: | The mount type. Available types: - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. type: "string" enum: - "bind" - "volume" - "tmpfs" - "npipe" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" Consistency: description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." type: "string" BindOptions: description: "Optional configuration for the `bind` type." type: "object" properties: Propagation: description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." type: "string" enum: - "private" - "rprivate" - "shared" - "rshared" - "slave" - "rslave" NonRecursive: description: "Disable recursive bind mount." type: "boolean" default: false VolumeOptions: description: "Optional configuration for the `volume` type." type: "object" properties: NoCopy: description: "Populate volume with data from the target." type: "boolean" default: false Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" DriverConfig: description: "Map of driver specific options" type: "object" properties: Name: description: "Name of the driver to use to create the volume." type: "string" Options: description: "key/value map of driver specific options." type: "object" additionalProperties: type: "string" TmpfsOptions: description: "Optional configuration for the `tmpfs` type." type: "object" properties: SizeBytes: description: "The size for the tmpfs mount in bytes." type: "integer" format: "int64" Mode: description: "The permission mode for the tmpfs mount in an integer." type: "integer" RestartPolicy: description: | The behavior to apply when the container exits. The default is not to restart. An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. type: "object" properties: Name: type: "string" description: | - Empty string means not to restart - `no` Do not automatically restart - `always` Always restart - `unless-stopped` Restart always except when the user has manually stopped the container - `on-failure` Restart only when the container exit code is non-zero enum: - "" - "no" - "always" - "unless-stopped" - "on-failure" MaximumRetryCount: type: "integer" description: | If `on-failure` is used, the number of times to retry before giving up. Resources: description: "A container's resources (cgroups config, ulimits, etc)" type: "object" properties: # Applicable to all platforms CpuShares: description: | An integer value representing this container's relative CPU weight versus other containers. type: "integer" Memory: description: "Memory limit in bytes." type: "integer" format: "int64" default: 0 # Applicable to UNIX platforms CgroupParent: description: | Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. type: "string" BlkioWeight: description: "Block IO weight (relative weight)." type: "integer" minimum: 0 maximum: 1000 BlkioWeightDevice: description: | Block IO weight (relative device weight) in the form: ``` [{"Path": "device_path", "Weight": weight}] ``` type: "array" items: type: "object" properties: Path: type: "string" Weight: type: "integer" minimum: 0 BlkioDeviceReadBps: description: | Limit read rate (bytes per second) from a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteBps: description: | Limit write rate (bytes per second) to a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceReadIOps: description: | Limit read rate (IO per second) from a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteIOps: description: | Limit write rate (IO per second) to a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" CpuPeriod: description: "The length of a CPU period in microseconds." type: "integer" format: "int64" CpuQuota: description: | Microseconds of CPU time that the container can get in a CPU period. type: "integer" format: "int64" CpuRealtimePeriod: description: | The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpuRealtimeRuntime: description: | The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpusetCpus: description: | CPUs in which to allow execution (e.g., `0-3`, `0,1`). type: "string" example: "0-3" CpusetMems: description: | Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. type: "string" Devices: description: "A list of devices to add to the container." type: "array" items: $ref: "#/definitions/DeviceMapping" DeviceCgroupRules: description: "a list of cgroup rules to apply to the container" type: "array" items: type: "string" example: "c 13:* rwm" DeviceRequests: description: | A list of requests for devices to be sent to device drivers. type: "array" items: $ref: "#/definitions/DeviceRequest" KernelMemory: description: | Kernel memory limit in bytes. <p><br /></p> > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated > `kmem.limit_in_bytes`. type: "integer" format: "int64" example: 209715200 KernelMemoryTCP: description: "Hard limit for kernel TCP buffer memory (in bytes)." type: "integer" format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" format: "int64" MemorySwap: description: | Total memory limit (memory + swap). Set as `-1` to enable unlimited swap. type: "integer" format: "int64" MemorySwappiness: description: | Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. type: "integer" format: "int64" minimum: 0 maximum: 100 NanoCpus: description: "CPU quota in units of 10<sup>-9</sup> CPUs." type: "integer" format: "int64" OomKillDisable: description: "Disable OOM Killer for the container." type: "boolean" Init: description: | Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. type: "boolean" x-nullable: true PidsLimit: description: | Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. type: "integer" format: "int64" x-nullable: true Ulimits: description: | A list of resource limits to set in the container. For example: ``` {"Name": "nofile", "Soft": 1024, "Hard": 2048} ``` type: "array" items: type: "object" properties: Name: description: "Name of ulimit" type: "string" Soft: description: "Soft limit" type: "integer" Hard: description: "Hard limit" type: "integer" # Applicable to Windows CpuCount: description: | The number of usable CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" CpuPercent: description: | The usable percentage of the available CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" IOMaximumIOps: description: "Maximum IOps for the container system drive (Windows only)" type: "integer" format: "int64" IOMaximumBandwidth: description: | Maximum IO in bytes per second for the container system drive (Windows only). type: "integer" format: "int64" Limit: description: | An object describing a limit on resources which can be requested by a task. type: "object" properties: NanoCPUs: type: "integer" format: "int64" example: 4000000000 MemoryBytes: type: "integer" format: "int64" example: 8272408576 Pids: description: | Limits the maximum number of PIDs in the container. Set `0` for unlimited. type: "integer" format: "int64" default: 0 example: 100 ResourceObject: description: | An object describing the resources which can be advertised by a node and requested by a task. type: "object" properties: NanoCPUs: type: "integer" format: "int64" example: 4000000000 MemoryBytes: type: "integer" format: "int64" example: 8272408576 GenericResources: $ref: "#/definitions/GenericResources" GenericResources: description: | User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`). type: "array" items: type: "object" properties: NamedResourceSpec: type: "object" properties: Kind: type: "string" Value: type: "string" DiscreteResourceSpec: type: "object" properties: Kind: type: "string" Value: type: "integer" format: "int64" example: - DiscreteResourceSpec: Kind: "SSD" Value: 3 - NamedResourceSpec: Kind: "GPU" Value: "UUID1" - NamedResourceSpec: Kind: "GPU" Value: "UUID2" HealthConfig: description: "A test to perform to check that the container is healthy." type: "object" properties: Test: description: | The test to perform. Possible values are: - `[]` inherit healthcheck from image or parent image - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell type: "array" items: type: "string" Interval: description: | The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Timeout: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Retries: description: | The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit. type: "integer" StartPeriod: description: | Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Health: description: | Health stores information about the container's healthcheck results. type: "object" properties: Status: description: | Status is one of `none`, `starting`, `healthy` or `unhealthy` - "none" Indicates there is no healthcheck - "starting" Starting indicates that the container is not yet ready - "healthy" Healthy indicates that the container is running correctly - "unhealthy" Unhealthy indicates that the container has a problem type: "string" enum: - "none" - "starting" - "healthy" - "unhealthy" example: "healthy" FailingStreak: description: "FailingStreak is the number of consecutive failures" type: "integer" example: 0 Log: type: "array" description: | Log contains the last few results (oldest first) items: x-nullable: true $ref: "#/definitions/HealthcheckResult" HealthcheckResult: description: | HealthcheckResult stores information about a single run of a healthcheck probe type: "object" properties: Start: description: | Date and time at which this check started in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "date-time" example: "2020-01-04T10:44:24.496525531Z" End: description: | Date and time at which this check ended in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2020-01-04T10:45:21.364524523Z" ExitCode: description: | ExitCode meanings: - `0` healthy - `1` unhealthy - `2` reserved (considered unhealthy) - other values: error running probe type: "integer" example: 0 Output: description: "Output from last check" type: "string" HostConfig: description: "Container configuration that depends on the host we are running on" allOf: - $ref: "#/definitions/Resources" - type: "object" properties: # Applicable to all platforms Binds: type: "array" description: | A list of volume bindings for this container. Each volume binding is a string in one of these forms: - `host-src:container-dest[:options]` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - `volume-name:container-dest[:options]` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. `options` is an optional, comma-delimited list of: - `nocopy` disables automatic copying of data from the container path to the volume. The `nocopy` flag only applies to named volumes. - `[ro|rw]` mounts a volume read-only or read-write, respectively. If omitted or set to `rw`, volumes are mounted read-write. - `[z|Z]` applies SELinux labels to allow or deny multiple containers to read and write to the same volume. - `z`: a _shared_ content label is applied to the content. This label indicates that multiple containers can share the volume content, for both reading and writing. - `Z`: a _private unshared_ label is applied to the content. This label indicates that only the current container can use a private volume. Labeling systems such as SELinux require proper labels to be placed on volume content that is mounted into a container. Without a label, the security system can prevent a container's processes from using the content. By default, the labels set by the host operating system are not modified. - `[[r]shared|[r]slave|[r]private]` specifies mount [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). This only applies to bind-mounted volumes, not internal volumes or named volumes. Mount propagation requires the source mount point (the location where the source directory is mounted in the host operating system) to have the correct propagation properties. For shared volumes, the source mount point must be set to `shared`. For slave volumes, the mount must be set to either `shared` or `slave`. items: type: "string" ContainerIDFile: type: "string" description: "Path to a file where the container ID is written" LogConfig: type: "object" description: "The logging configuration for this container" properties: Type: type: "string" enum: - "json-file" - "syslog" - "journald" - "gelf" - "fluentd" - "awslogs" - "splunk" - "etwlogs" - "none" Config: type: "object" additionalProperties: type: "string" NetworkMode: type: "string" description: | Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken as a custom network's name to which this container should connect to. PortBindings: $ref: "#/definitions/PortMap" RestartPolicy: $ref: "#/definitions/RestartPolicy" AutoRemove: type: "boolean" description: | Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set. VolumeDriver: type: "string" description: "Driver that this container uses to mount volumes." VolumesFrom: type: "array" description: | A list of volumes to inherit from another container, specified in the form `<container name>[:<ro|rw>]`. items: type: "string" Mounts: description: | Specification for mounts to be added to the container. type: "array" items: $ref: "#/definitions/Mount" # Applicable to UNIX platforms CapAdd: type: "array" description: | A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'. items: type: "string" CapDrop: type: "array" description: | A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'. items: type: "string" CgroupnsMode: type: "string" enum: - "private" - "host" description: | cgroup namespace mode for the container. Possible values are: - `"private"`: the container runs in its own private cgroup namespace - `"host"`: use the host system's cgroup namespace If not specified, the daemon default is used, which can either be `"private"` or `"host"`, depending on daemon version, kernel support and configuration. Dns: type: "array" description: "A list of DNS servers for the container to use." items: type: "string" DnsOptions: type: "array" description: "A list of DNS options." items: type: "string" DnsSearch: type: "array" description: "A list of DNS search domains." items: type: "string" ExtraHosts: type: "array" description: | A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. items: type: "string" GroupAdd: type: "array" description: | A list of additional groups that the container process will run as. items: type: "string" IpcMode: type: "string" description: | IPC sharing mode for the container. Possible values are: - `"none"`: own private IPC namespace, with /dev/shm not mounted - `"private"`: own private IPC namespace - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers - `"container:<name|id>"`: join another (shareable) container's IPC namespace - `"host"`: use the host system's IPC namespace If not specified, daemon default is used, which can either be `"private"` or `"shareable"`, depending on daemon version and configuration. Cgroup: type: "string" description: "Cgroup to use for the container." Links: type: "array" description: | A list of links for the container in the form `container_name:alias`. items: type: "string" OomScoreAdj: type: "integer" description: | An integer value containing the score given to the container in order to tune OOM killer preferences. example: 500 PidMode: type: "string" description: | Set the PID (Process) Namespace mode for the container. It can be either: - `"container:<name|id>"`: joins another container's PID namespace - `"host"`: use the host's PID namespace inside the container Privileged: type: "boolean" description: "Gives the container full access to the host." PublishAllPorts: type: "boolean" description: | Allocates an ephemeral host port for all of a container's exposed ports. Ports are de-allocated when the container stops and allocated when the container starts. The allocated port might be changed when restarting the container. The port is selected from the ephemeral port range that depends on the kernel. For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. ReadonlyRootfs: type: "boolean" description: "Mount the container's root filesystem as read only." SecurityOpt: type: "array" description: "A list of string values to customize labels for MLS systems, such as SELinux." items: type: "string" StorageOpt: type: "object" description: | Storage driver options for this container, in the form `{"size": "120G"}`. additionalProperties: type: "string" Tmpfs: type: "object" description: | A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: ``` { "/run": "rw,noexec,nosuid,size=65536k" } ``` additionalProperties: type: "string" UTSMode: type: "string" description: "UTS namespace to use for the container." UsernsMode: type: "string" description: | Sets the usernamespace mode for the container when usernamespace remapping option is enabled. ShmSize: type: "integer" description: | Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. minimum: 0 Sysctls: type: "object" description: | A list of kernel parameters (sysctls) to set in the container. For example: ``` {"net.ipv4.ip_forward": "1"} ``` additionalProperties: type: "string" Runtime: type: "string" description: "Runtime to use with this container." # Applicable to Windows ConsoleSize: type: "array" description: | Initial console size, as an `[height, width]` array. (Windows only) minItems: 2 maxItems: 2 items: type: "integer" minimum: 0 Isolation: type: "string" description: | Isolation technology of the container. (Windows only) enum: - "default" - "process" - "hyperv" MaskedPaths: type: "array" description: | The list of paths to be masked inside the container (this overrides the default set of paths). items: type: "string" ReadonlyPaths: type: "array" description: | The list of paths to be set as read-only inside the container (this overrides the default set of paths). items: type: "string" ContainerConfig: description: "Configuration for a container that is portable between hosts" type: "object" properties: Hostname: description: "The hostname to use for the container, as a valid RFC 1123 hostname." type: "string" Domainname: description: "The domain name to use for the container." type: "string" User: description: "The user that commands are run as inside the container." type: "string" AttachStdin: description: "Whether to attach to `stdin`." type: "boolean" default: false AttachStdout: description: "Whether to attach to `stdout`." type: "boolean" default: true AttachStderr: description: "Whether to attach to `stderr`." type: "boolean" default: true ExposedPorts: description: | An object mapping ports to an empty object in the form: `{"<port>/<tcp|udp|sctp>": {}}` type: "object" additionalProperties: type: "object" enum: - {} default: {} Tty: description: | Attach standard streams to a TTY, including `stdin` if it is not closed. type: "boolean" default: false OpenStdin: description: "Open `stdin`" type: "boolean" default: false StdinOnce: description: "Close `stdin` after one attached client disconnects" type: "boolean" default: false Env: description: | A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. type: "array" items: type: "string" Cmd: description: | Command to run specified as a string or an array of strings. type: "array" items: type: "string" Healthcheck: $ref: "#/definitions/HealthConfig" ArgsEscaped: description: "Command is already escaped (Windows only)" type: "boolean" Image: description: | The name of the image to use when creating the container/ type: "string" Volumes: description: | An object mapping mount point paths inside the container to empty objects. type: "object" additionalProperties: type: "object" enum: - {} default: {} WorkingDir: description: "The working directory for commands to run in." type: "string" Entrypoint: description: | The entry point for the container as a string or an array of strings. If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). type: "array" items: type: "string" NetworkDisabled: description: "Disable networking for the container." type: "boolean" MacAddress: description: "MAC address of the container." type: "string" OnBuild: description: | `ONBUILD` metadata that were defined in the image's `Dockerfile`. type: "array" items: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" StopSignal: description: | Signal to stop a container as a string or unsigned integer. type: "string" default: "SIGTERM" StopTimeout: description: "Timeout to stop a container in seconds." type: "integer" default: 10 Shell: description: | Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. type: "array" items: type: "string" NetworkingConfig: description: | NetworkingConfig represents the container's networking configuration for each of its interfaces. It is used for the networking configs specified in the `docker create` and `docker network connect` commands. type: "object" properties: EndpointsConfig: description: | A mapping of network name to endpoint configuration for that network. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" example: # putting an example here, instead of using the example values from # /definitions/EndpointSettings, because containers/create currently # does not support attaching to multiple networks, so the example request # would be confusing if it showed that multiple networks can be contained # in the EndpointsConfig. # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) EndpointsConfig: isolated_nw: IPAMConfig: IPv4Address: "172.20.30.33" IPv6Address: "2001:db8:abcd::3033" LinkLocalIPs: - "169.254.34.68" - "fe80::3468" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" NetworkSettings: description: "NetworkSettings exposes the network settings in the API" type: "object" properties: Bridge: description: Name of the network's bridge (for example, `docker0`). type: "string" example: "docker0" SandboxID: description: SandboxID uniquely represents a container's network stack. type: "string" example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" HairpinMode: description: | Indicates if hairpin NAT should be enabled on the virtual interface. type: "boolean" example: false LinkLocalIPv6Address: description: IPv6 unicast address using the link-local prefix. type: "string" example: "fe80::42:acff:fe11:1" LinkLocalIPv6PrefixLen: description: Prefix length of the IPv6 unicast address. type: "integer" example: "64" Ports: $ref: "#/definitions/PortMap" SandboxKey: description: SandboxKey identifies the sandbox type: "string" example: "/var/run/docker/netns/8ab54b426c38" # TODO is SecondaryIPAddresses actually used? SecondaryIPAddresses: description: "" type: "array" items: $ref: "#/definitions/Address" x-nullable: true # TODO is SecondaryIPv6Addresses actually used? SecondaryIPv6Addresses: description: "" type: "array" items: $ref: "#/definitions/Address" x-nullable: true # TODO properties below are part of DefaultNetworkSettings, which is # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 EndpointID: description: | EndpointID uniquely represents a service endpoint in a Sandbox. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" Gateway: description: | Gateway address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "172.17.0.1" GlobalIPv6Address: description: | Global IPv6 address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "2001:db8::5689" GlobalIPv6PrefixLen: description: | Mask length of the global IPv6 address. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "integer" example: 64 IPAddress: description: | IPv4 address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "172.17.0.4" IPPrefixLen: description: | Mask length of the IPv4 address. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "integer" example: 16 IPv6Gateway: description: | IPv6 gateway address for this network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "2001:db8:2::100" MacAddress: description: | MAC address for the container on the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "02:42:ac:11:00:04" Networks: description: | Information about all networks that the container is connected to. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" Address: description: Address represents an IPv4 or IPv6 IP address. type: "object" properties: Addr: description: IP address. type: "string" PrefixLen: description: Mask length of the IP address. type: "integer" PortMap: description: | PortMap describes the mapping of container ports to host ports, using the container's port-number and protocol as key in the format `<port>/<protocol>`, for example, `80/udp`. If a container's port is mapped for multiple protocols, separate entries are added to the mapping table. type: "object" additionalProperties: type: "array" x-nullable: true items: $ref: "#/definitions/PortBinding" example: "443/tcp": - HostIp: "127.0.0.1" HostPort: "4443" "80/tcp": - HostIp: "0.0.0.0" HostPort: "80" - HostIp: "0.0.0.0" HostPort: "8080" "80/udp": - HostIp: "0.0.0.0" HostPort: "80" "53/udp": - HostIp: "0.0.0.0" HostPort: "53" "2377/tcp": null PortBinding: description: | PortBinding represents a binding between a host IP address and a host port. type: "object" properties: HostIp: description: "Host IP address that the container's port is mapped to." type: "string" example: "127.0.0.1" HostPort: description: "Host port number that the container's port is mapped to." type: "string" example: "4443" GraphDriverData: description: "Information about a container's graph driver." type: "object" required: [Name, Data] properties: Name: type: "string" x-nullable: false Data: type: "object" x-nullable: false additionalProperties: type: "string" Image: type: "object" required: - Id - Parent - Comment - Created - Container - DockerVersion - Author - Architecture - Os - Size - VirtualSize - GraphDriver - RootFS properties: Id: type: "string" x-nullable: false RepoTags: type: "array" items: type: "string" RepoDigests: type: "array" items: type: "string" Parent: type: "string" x-nullable: false Comment: type: "string" x-nullable: false Created: type: "string" x-nullable: false Container: type: "string" x-nullable: false ContainerConfig: $ref: "#/definitions/ContainerConfig" DockerVersion: type: "string" x-nullable: false Author: type: "string" x-nullable: false Config: $ref: "#/definitions/ContainerConfig" Architecture: type: "string" x-nullable: false Os: type: "string" x-nullable: false OsVersion: type: "string" Size: type: "integer" format: "int64" x-nullable: false VirtualSize: type: "integer" format: "int64" x-nullable: false GraphDriver: $ref: "#/definitions/GraphDriverData" RootFS: type: "object" required: [Type] properties: Type: type: "string" x-nullable: false Layers: type: "array" items: type: "string" BaseLayer: type: "string" Metadata: type: "object" properties: LastTagTime: type: "string" format: "dateTime" ImageSummary: type: "object" required: - Id - ParentId - RepoTags - RepoDigests - Created - Size - SharedSize - VirtualSize - Labels - Containers properties: Id: type: "string" x-nullable: false ParentId: type: "string" x-nullable: false RepoTags: type: "array" x-nullable: false items: type: "string" RepoDigests: type: "array" x-nullable: false items: type: "string" Created: type: "integer" x-nullable: false Size: type: "integer" x-nullable: false SharedSize: type: "integer" x-nullable: false VirtualSize: type: "integer" x-nullable: false Labels: type: "object" x-nullable: false additionalProperties: type: "string" Containers: x-nullable: false type: "integer" AuthConfig: type: "object" properties: username: type: "string" password: type: "string" email: type: "string" serveraddress: type: "string" example: username: "hannibal" password: "xxxx" serveraddress: "https://index.docker.io/v1/" ProcessConfig: type: "object" properties: privileged: type: "boolean" user: type: "string" tty: type: "boolean" entrypoint: type: "string" arguments: type: "array" items: type: "string" Volume: type: "object" required: [Name, Driver, Mountpoint, Labels, Scope, Options] properties: Name: type: "string" description: "Name of the volume." x-nullable: false Driver: type: "string" description: "Name of the volume driver used by the volume." x-nullable: false Mountpoint: type: "string" description: "Mount path of the volume on the host." x-nullable: false CreatedAt: type: "string" format: "dateTime" description: "Date/Time the volume was created." Status: type: "object" description: | Low-level details about the volume, provided by the volume driver. Details are returned as a map with key/value pairs: `{"key":"value","key2":"value2"}`. The `Status` field is optional, and is omitted if the volume driver does not support this feature. additionalProperties: type: "object" Labels: type: "object" description: "User-defined key/value metadata." x-nullable: false additionalProperties: type: "string" Scope: type: "string" description: | The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. default: "local" x-nullable: false enum: ["local", "global"] Options: type: "object" description: | The driver specific options used when creating the volume. additionalProperties: type: "string" UsageData: type: "object" x-nullable: true required: [Size, RefCount] description: | Usage details about the volume. This information is used by the `GET /system/df` endpoint, and omitted in other endpoints. properties: Size: type: "integer" default: -1 description: | Amount of disk space used by the volume (in bytes). This information is only available for volumes created with the `"local"` volume driver. For volumes created with other volume drivers, this field is set to `-1` ("not available") x-nullable: false RefCount: type: "integer" default: -1 description: | The number of containers referencing this volume. This field is set to `-1` if the reference-count is not available. x-nullable: false example: Name: "tardis" Driver: "custom" Mountpoint: "/var/lib/docker/volumes/tardis" Status: hello: "world" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Scope: "local" CreatedAt: "2016-06-07T20:31:11.853781916Z" Network: type: "object" properties: Name: type: "string" Id: type: "string" Created: type: "string" format: "dateTime" Scope: type: "string" Driver: type: "string" EnableIPv6: type: "boolean" IPAM: $ref: "#/definitions/IPAM" Internal: type: "boolean" Attachable: type: "boolean" Ingress: type: "boolean" Containers: type: "object" additionalProperties: $ref: "#/definitions/NetworkContainer" Options: type: "object" additionalProperties: type: "string" Labels: type: "object" additionalProperties: type: "string" example: Name: "net01" Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" Created: "2016-10-19T04:33:30.360899459Z" Scope: "local" Driver: "bridge" EnableIPv6: false IPAM: Driver: "default" Config: - Subnet: "172.19.0.0/16" Gateway: "172.19.0.1" Options: foo: "bar" Internal: false Attachable: false Ingress: false Containers: 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: Name: "test" EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" MacAddress: "02:42:ac:13:00:02" IPv4Address: "172.19.0.2/16" IPv6Address: "" Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" IPAM: type: "object" properties: Driver: description: "Name of the IPAM driver to use." type: "string" default: "default" Config: description: | List of IPAM configuration options, specified as a map: ``` {"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>} ``` type: "array" items: type: "object" additionalProperties: type: "string" Options: description: "Driver-specific options, specified as a map." type: "object" additionalProperties: type: "string" NetworkContainer: type: "object" properties: Name: type: "string" EndpointID: type: "string" MacAddress: type: "string" IPv4Address: type: "string" IPv6Address: type: "string" BuildInfo: type: "object" properties: id: type: "string" stream: type: "string" error: type: "string" errorDetail: $ref: "#/definitions/ErrorDetail" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" aux: $ref: "#/definitions/ImageID" BuildCache: type: "object" properties: ID: type: "string" Parent: type: "string" Type: type: "string" Description: type: "string" InUse: type: "boolean" Shared: type: "boolean" Size: description: | Amount of disk space used by the build cache (in bytes). type: "integer" CreatedAt: description: | Date and time at which the build cache was created in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" LastUsedAt: description: | Date and time at which the build cache was last used in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" x-nullable: true example: "2017-08-09T07:09:37.632105588Z" UsageCount: type: "integer" ImageID: type: "object" description: "Image ID or Digest" properties: ID: type: "string" example: ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" CreateImageInfo: type: "object" properties: id: type: "string" error: type: "string" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" PushImageInfo: type: "object" properties: error: type: "string" status: type: "string" progress: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" ErrorDetail: type: "object" properties: code: type: "integer" message: type: "string" ProgressDetail: type: "object" properties: current: type: "integer" total: type: "integer" ErrorResponse: description: "Represents an error." type: "object" required: ["message"] properties: message: description: "The error message." type: "string" x-nullable: false example: message: "Something went wrong." IdResponse: description: "Response to an API call that returns just an Id" type: "object" required: ["Id"] properties: Id: description: "The id of the newly created object." type: "string" x-nullable: false EndpointSettings: description: "Configuration for a network endpoint." type: "object" properties: # Configurations IPAMConfig: $ref: "#/definitions/EndpointIPAMConfig" Links: type: "array" items: type: "string" example: - "container_1" - "container_2" Aliases: type: "array" items: type: "string" example: - "server_x" - "server_y" # Operational data NetworkID: description: | Unique ID of the network. type: "string" example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" EndpointID: description: | Unique ID for the service endpoint in a Sandbox. type: "string" example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" Gateway: description: | Gateway address for this network. type: "string" example: "172.17.0.1" IPAddress: description: | IPv4 address. type: "string" example: "172.17.0.4" IPPrefixLen: description: | Mask length of the IPv4 address. type: "integer" example: 16 IPv6Gateway: description: | IPv6 gateway address. type: "string" example: "2001:db8:2::100" GlobalIPv6Address: description: | Global IPv6 address. type: "string" example: "2001:db8::5689" GlobalIPv6PrefixLen: description: | Mask length of the global IPv6 address. type: "integer" format: "int64" example: 64 MacAddress: description: | MAC address for the endpoint on this network. type: "string" example: "02:42:ac:11:00:04" DriverOpts: description: | DriverOpts is a mapping of driver options and values. These options are passed directly to the driver and are driver specific. type: "object" x-nullable: true additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" EndpointIPAMConfig: description: | EndpointIPAMConfig represents an endpoint's IPAM configuration. type: "object" x-nullable: true properties: IPv4Address: type: "string" example: "172.20.30.33" IPv6Address: type: "string" example: "2001:db8:abcd::3033" LinkLocalIPs: type: "array" items: type: "string" example: - "169.254.34.68" - "fe80::3468" PluginMount: type: "object" x-nullable: false required: [Name, Description, Settable, Source, Destination, Type, Options] properties: Name: type: "string" x-nullable: false example: "some-mount" Description: type: "string" x-nullable: false example: "This is a mount that's used by the plugin." Settable: type: "array" items: type: "string" Source: type: "string" example: "/var/lib/docker/plugins/" Destination: type: "string" x-nullable: false example: "/mnt/state" Type: type: "string" x-nullable: false example: "bind" Options: type: "array" items: type: "string" example: - "rbind" - "rw" PluginDevice: type: "object" required: [Name, Description, Settable, Path] x-nullable: false properties: Name: type: "string" x-nullable: false Description: type: "string" x-nullable: false Settable: type: "array" items: type: "string" Path: type: "string" example: "/dev/fuse" PluginEnv: type: "object" x-nullable: false required: [Name, Description, Settable, Value] properties: Name: x-nullable: false type: "string" Description: x-nullable: false type: "string" Settable: type: "array" items: type: "string" Value: type: "string" PluginInterfaceType: type: "object" x-nullable: false required: [Prefix, Capability, Version] properties: Prefix: type: "string" x-nullable: false Capability: type: "string" x-nullable: false Version: type: "string" x-nullable: false PluginPrivilege: description: | Describes a permission the user has to accept upon installing the plugin. type: "object" x-go-name: "PluginPrivilege" properties: Name: type: "string" example: "network" Description: type: "string" Value: type: "array" items: type: "string" example: - "host" Plugin: description: "A plugin for the Engine API" type: "object" required: [Settings, Enabled, Config, Name] properties: Id: type: "string" example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" Name: type: "string" x-nullable: false example: "tiborvass/sample-volume-plugin" Enabled: description: True if the plugin is running. False if the plugin is not running, only installed. type: "boolean" x-nullable: false example: true Settings: description: "Settings that can be modified by users." type: "object" x-nullable: false required: [Args, Devices, Env, Mounts] properties: Mounts: type: "array" items: $ref: "#/definitions/PluginMount" Env: type: "array" items: type: "string" example: - "DEBUG=0" Args: type: "array" items: type: "string" Devices: type: "array" items: $ref: "#/definitions/PluginDevice" PluginReference: description: "plugin remote reference used to push/pull the plugin" type: "string" x-nullable: false example: "localhost:5000/tiborvass/sample-volume-plugin:latest" Config: description: "The config of a plugin." type: "object" x-nullable: false required: - Description - Documentation - Interface - Entrypoint - WorkDir - Network - Linux - PidHost - PropagatedMount - IpcHost - Mounts - Env - Args properties: DockerVersion: description: "Docker Version used to create the plugin" type: "string" x-nullable: false example: "17.06.0-ce" Description: type: "string" x-nullable: false example: "A sample volume plugin for Docker" Documentation: type: "string" x-nullable: false example: "https://docs.docker.com/engine/extend/plugins/" Interface: description: "The interface between Docker and the plugin" x-nullable: false type: "object" required: [Types, Socket] properties: Types: type: "array" items: $ref: "#/definitions/PluginInterfaceType" example: - "docker.volumedriver/1.0" Socket: type: "string" x-nullable: false example: "plugins.sock" ProtocolScheme: type: "string" example: "some.protocol/v1.0" description: "Protocol to use for clients connecting to the plugin." enum: - "" - "moby.plugins.http/v1" Entrypoint: type: "array" items: type: "string" example: - "/usr/bin/sample-volume-plugin" - "/data" WorkDir: type: "string" x-nullable: false example: "/bin/" User: type: "object" x-nullable: false properties: UID: type: "integer" format: "uint32" example: 1000 GID: type: "integer" format: "uint32" example: 1000 Network: type: "object" x-nullable: false required: [Type] properties: Type: x-nullable: false type: "string" example: "host" Linux: type: "object" x-nullable: false required: [Capabilities, AllowAllDevices, Devices] properties: Capabilities: type: "array" items: type: "string" example: - "CAP_SYS_ADMIN" - "CAP_SYSLOG" AllowAllDevices: type: "boolean" x-nullable: false example: false Devices: type: "array" items: $ref: "#/definitions/PluginDevice" PropagatedMount: type: "string" x-nullable: false example: "/mnt/volumes" IpcHost: type: "boolean" x-nullable: false example: false PidHost: type: "boolean" x-nullable: false example: false Mounts: type: "array" items: $ref: "#/definitions/PluginMount" Env: type: "array" items: $ref: "#/definitions/PluginEnv" example: - Name: "DEBUG" Description: "If set, prints debug messages" Settable: null Value: "0" Args: type: "object" x-nullable: false required: [Name, Description, Settable, Value] properties: Name: x-nullable: false type: "string" example: "args" Description: x-nullable: false type: "string" example: "command line arguments" Settable: type: "array" items: type: "string" Value: type: "array" items: type: "string" rootfs: type: "object" properties: type: type: "string" example: "layers" diff_ids: type: "array" items: type: "string" example: - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" ObjectVersion: description: | The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. The client must send the version number along with the modified specification when updating these objects. This approach ensures safe concurrency and determinism in that the change on the object may not be applied if the version number has changed from the last read. In other words, if two update requests specify the same base version, only one of the requests can succeed. As a result, two separate update requests that happen at the same time will not unintentionally overwrite each other. type: "object" properties: Index: type: "integer" format: "uint64" example: 373531 NodeSpec: type: "object" properties: Name: description: "Name for the node." type: "string" example: "my-node" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Role: description: "Role of the node." type: "string" enum: - "worker" - "manager" example: "manager" Availability: description: "Availability of the node." type: "string" enum: - "active" - "pause" - "drain" example: "active" example: Availability: "active" Name: "node-name" Role: "manager" Labels: foo: "bar" Node: type: "object" properties: ID: type: "string" example: "24ifsmvkjbyhk" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: description: | Date and time at which the node was added to the swarm in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: description: | Date and time at which the node was last updated in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/NodeSpec" Description: $ref: "#/definitions/NodeDescription" Status: $ref: "#/definitions/NodeStatus" ManagerStatus: $ref: "#/definitions/ManagerStatus" NodeDescription: description: | NodeDescription encapsulates the properties of the Node as reported by the agent. type: "object" properties: Hostname: type: "string" example: "bf3067039e47" Platform: $ref: "#/definitions/Platform" Resources: $ref: "#/definitions/ResourceObject" Engine: $ref: "#/definitions/EngineDescription" TLSInfo: $ref: "#/definitions/TLSInfo" Platform: description: | Platform represents the platform (Arch/OS). type: "object" properties: Architecture: description: | Architecture represents the hardware architecture (for example, `x86_64`). type: "string" example: "x86_64" OS: description: | OS represents the Operating System (for example, `linux` or `windows`). type: "string" example: "linux" EngineDescription: description: "EngineDescription provides information about an engine." type: "object" properties: EngineVersion: type: "string" example: "17.06.0" Labels: type: "object" additionalProperties: type: "string" example: foo: "bar" Plugins: type: "array" items: type: "object" properties: Type: type: "string" Name: type: "string" example: - Type: "Log" Name: "awslogs" - Type: "Log" Name: "fluentd" - Type: "Log" Name: "gcplogs" - Type: "Log" Name: "gelf" - Type: "Log" Name: "journald" - Type: "Log" Name: "json-file" - Type: "Log" Name: "logentries" - Type: "Log" Name: "splunk" - Type: "Log" Name: "syslog" - Type: "Network" Name: "bridge" - Type: "Network" Name: "host" - Type: "Network" Name: "ipvlan" - Type: "Network" Name: "macvlan" - Type: "Network" Name: "null" - Type: "Network" Name: "overlay" - Type: "Volume" Name: "local" - Type: "Volume" Name: "localhost:5000/vieux/sshfs:latest" - Type: "Volume" Name: "vieux/sshfs:latest" TLSInfo: description: | Information about the issuer of leaf TLS certificates and the trusted root CA certificate. type: "object" properties: TrustRoot: description: | The root CA certificate(s) that are used to validate leaf TLS certificates. type: "string" CertIssuerSubject: description: The base64-url-safe-encoded raw subject bytes of the issuer. type: "string" CertIssuerPublicKey: description: | The base64-url-safe-encoded raw public key bytes of the issuer. type: "string" example: TrustRoot: | -----BEGIN CERTIFICATE----- MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H -----END CERTIFICATE----- CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" NodeStatus: description: | NodeStatus represents the status of a node. It provides the current status of the node, as seen by the manager. type: "object" properties: State: $ref: "#/definitions/NodeState" Message: type: "string" example: "" Addr: description: "IP address of the node." type: "string" example: "172.17.0.2" NodeState: description: "NodeState represents the state of a node." type: "string" enum: - "unknown" - "down" - "ready" - "disconnected" example: "ready" ManagerStatus: description: | ManagerStatus represents the status of a manager. It provides the current status of a node's manager component, if the node is a manager. x-nullable: true type: "object" properties: Leader: type: "boolean" default: false example: true Reachability: $ref: "#/definitions/Reachability" Addr: description: | The IP address and port at which the manager is reachable. type: "string" example: "10.0.0.46:2377" Reachability: description: "Reachability represents the reachability of a node." type: "string" enum: - "unknown" - "unreachable" - "reachable" example: "reachable" SwarmSpec: description: "User modifiable swarm configuration." type: "object" properties: Name: description: "Name of the swarm." type: "string" example: "default" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.corp.type: "production" com.example.corp.department: "engineering" Orchestration: description: "Orchestration configuration." type: "object" x-nullable: true properties: TaskHistoryRetentionLimit: description: | The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks. type: "integer" format: "int64" example: 10 Raft: description: "Raft configuration." type: "object" properties: SnapshotInterval: description: "The number of log entries between snapshots." type: "integer" format: "uint64" example: 10000 KeepOldSnapshots: description: | The number of snapshots to keep beyond the current snapshot. type: "integer" format: "uint64" LogEntriesForSlowFollowers: description: | The number of log entries to keep around to sync up slow followers after a snapshot is created. type: "integer" format: "uint64" example: 500 ElectionTick: description: | The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 3 HeartbeatTick: description: | The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 1 Dispatcher: description: "Dispatcher configuration." type: "object" x-nullable: true properties: HeartbeatPeriod: description: | The delay for an agent to send a heartbeat to the dispatcher. type: "integer" format: "int64" example: 5000000000 CAConfig: description: "CA configuration." type: "object" x-nullable: true properties: NodeCertExpiry: description: "The duration node certificates are issued for." type: "integer" format: "int64" example: 7776000000000000 ExternalCAs: description: | Configuration for forwarding signing requests to an external certificate authority. type: "array" items: type: "object" properties: Protocol: description: | Protocol for communication with the external CA (currently only `cfssl` is supported). type: "string" enum: - "cfssl" default: "cfssl" URL: description: | URL where certificate signing requests should be sent. type: "string" Options: description: | An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver. type: "object" additionalProperties: type: "string" CACert: description: | The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided). type: "string" SigningCACert: description: | The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format. type: "string" SigningCAKey: description: | The desired signing CA key for all swarm node TLS leaf certificates, in PEM format. type: "string" ForceRotate: description: | An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey` format: "uint64" type: "integer" EncryptionConfig: description: "Parameters related to encryption-at-rest." type: "object" properties: AutoLockManagers: description: | If set, generate a key and use it to lock data stored on the managers. type: "boolean" example: false TaskDefaults: description: "Defaults for creating tasks in this cluster." type: "object" properties: LogDriver: description: | The log driver to use for tasks created in the orchestrator if unspecified by a service. Updating this value only affects new tasks. Existing tasks continue to use their previously configured log driver until recreated. type: "object" properties: Name: description: | The log driver to use as a default for new tasks. type: "string" example: "json-file" Options: description: | Driver-specific options for the selectd log driver, specified as key/value pairs. type: "object" additionalProperties: type: "string" example: "max-file": "10" "max-size": "100m" # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but # without `JoinTokens`. ClusterInfo: description: | ClusterInfo represents information about the swarm as is returned by the "/info" endpoint. Join-tokens are not included. x-nullable: true type: "object" properties: ID: description: "The ID of the swarm." type: "string" example: "abajmipo7b4xz5ip2nrla6b11" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: description: | Date and time at which the swarm was initialised in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: description: | Date and time at which the swarm was last updated in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/SwarmSpec" TLSInfo: $ref: "#/definitions/TLSInfo" RootRotationInProgress: description: | Whether there is currently a root CA rotation in progress for the swarm type: "boolean" example: false DataPathPort: description: | DataPathPort specifies the data path port number for data traffic. Acceptable port range is 1024 to 49151. If no port is set or is set to 0, the default port (4789) is used. type: "integer" format: "uint32" default: 4789 example: 4789 DefaultAddrPool: description: | Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" format: "CIDR" example: ["10.10.0.0/16", "20.20.0.0/16"] SubnetSize: description: | SubnetSize specifies the subnet size of the networks created from the default subnet pool. type: "integer" format: "uint32" maximum: 29 default: 24 example: 24 JoinTokens: description: | JoinTokens contains the tokens workers and managers need to join the swarm. type: "object" properties: Worker: description: | The token workers can use to join the swarm. type: "string" example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" Manager: description: | The token managers can use to join the swarm. type: "string" example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" Swarm: type: "object" allOf: - $ref: "#/definitions/ClusterInfo" - type: "object" properties: JoinTokens: $ref: "#/definitions/JoinTokens" TaskSpec: description: "User modifiable task configuration." type: "object" properties: PluginSpec: type: "object" description: | Plugin spec for the service. *(Experimental release only.)* <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. properties: Name: description: "The name or 'alias' to use for the plugin." type: "string" Remote: description: "The plugin image reference to use." type: "string" Disabled: description: "Disable the plugin once scheduled." type: "boolean" PluginPrivilege: type: "array" items: $ref: "#/definitions/PluginPrivilege" ContainerSpec: type: "object" description: | Container spec for the service. <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. properties: Image: description: "The image name to use for the container" type: "string" Labels: description: "User-defined key/value data." type: "object" additionalProperties: type: "string" Command: description: "The command to be run in the image." type: "array" items: type: "string" Args: description: "Arguments to the command." type: "array" items: type: "string" Hostname: description: | The hostname to use for the container, as a valid [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. type: "string" Env: description: | A list of environment variables in the form `VAR=value`. type: "array" items: type: "string" Dir: description: "The working directory for commands to run in." type: "string" User: description: "The user inside the container." type: "string" Groups: type: "array" description: | A list of additional groups that the container process will run as. items: type: "string" Privileges: type: "object" description: "Security options for the container" properties: CredentialSpec: type: "object" description: "CredentialSpec for managed service account (Windows only)" properties: Config: type: "string" example: "0bt9dmxjvjiqermk6xrop3ekq" description: | Load credential spec from a Swarm Config with the given ID. The specified config must also be present in the Configs field with the Runtime property set. <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. File: type: "string" example: "spec.json" description: | Load credential spec from this file. The file is read by the daemon, and must be present in the `CredentialSpecs` subdirectory in the docker data directory, which defaults to `C:\ProgramData\Docker\` on Windows. For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`. <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. Registry: type: "string" description: | Load credential spec from this value in the Windows registry. The specified registry value must be located in: `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. SELinuxContext: type: "object" description: "SELinux labels of the container" properties: Disable: type: "boolean" description: "Disable SELinux" User: type: "string" description: "SELinux user label" Role: type: "string" description: "SELinux role label" Type: type: "string" description: "SELinux type label" Level: type: "string" description: "SELinux level label" TTY: description: "Whether a pseudo-TTY should be allocated." type: "boolean" OpenStdin: description: "Open `stdin`" type: "boolean" ReadOnly: description: "Mount the container's root filesystem as read only." type: "boolean" Mounts: description: | Specification for mounts to be added to containers created as part of the service. type: "array" items: $ref: "#/definitions/Mount" StopSignal: description: "Signal to stop the container." type: "string" StopGracePeriod: description: | Amount of time to wait for the container to terminate before forcefully killing it. type: "integer" format: "int64" HealthCheck: $ref: "#/definitions/HealthConfig" Hosts: type: "array" description: | A list of hostname/IP mappings to add to the container's `hosts` file. The format of extra hosts is specified in the [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) man page: IP_address canonical_hostname [aliases...] items: type: "string" DNSConfig: description: | Specification for DNS related configurations in resolver configuration file (`resolv.conf`). type: "object" properties: Nameservers: description: "The IP addresses of the name servers." type: "array" items: type: "string" Search: description: "A search list for host-name lookup." type: "array" items: type: "string" Options: description: | A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.). type: "array" items: type: "string" Secrets: description: | Secrets contains references to zero or more secrets that will be exposed to the service. type: "array" items: type: "object" properties: File: description: | File represents a specific target that is backed by a file. type: "object" properties: Name: description: | Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." type: "string" GID: description: "GID represents the file GID." type: "string" Mode: description: "Mode represents the FileMode of the file." type: "integer" format: "uint32" SecretID: description: | SecretID represents the ID of the specific secret that we're referencing. type: "string" SecretName: description: | SecretName is the name of the secret that this references, but this is just provided for lookup/display purposes. The secret in the reference will be identified by its ID. type: "string" Configs: description: | Configs contains references to zero or more configs that will be exposed to the service. type: "array" items: type: "object" properties: File: description: | File represents a specific target that is backed by a file. <p><br /><p> > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive type: "object" properties: Name: description: | Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." type: "string" GID: description: "GID represents the file GID." type: "string" Mode: description: "Mode represents the FileMode of the file." type: "integer" format: "uint32" Runtime: description: | Runtime represents a target that is not mounted into the container but is used by the task <p><br /><p> > **Note**: `Configs.File` and `Configs.Runtime` are mutually > exclusive type: "object" ConfigID: description: | ConfigID represents the ID of the specific config that we're referencing. type: "string" ConfigName: description: | ConfigName is the name of the config that this references, but this is just provided for lookup/display purposes. The config in the reference will be identified by its ID. type: "string" Isolation: type: "string" description: | Isolation technology of the containers running the service. (Windows only) enum: - "default" - "process" - "hyperv" Init: description: | Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. type: "boolean" x-nullable: true Sysctls: description: | Set kernel namedspaced parameters (sysctls) in the container. The Sysctls option on services accepts the same sysctls as the are supported on containers. Note that while the same sysctls are supported, no guarantees or checks are made about their suitability for a clustered environment, and it's up to the user to determine whether a given sysctl will work properly in a Service. type: "object" additionalProperties: type: "string" # This option is not used by Windows containers CapabilityAdd: type: "array" description: | A list of kernel capabilities to add to the default set for the container. items: type: "string" example: - "CAP_NET_RAW" - "CAP_SYS_ADMIN" - "CAP_SYS_CHROOT" - "CAP_SYSLOG" CapabilityDrop: type: "array" description: | A list of kernel capabilities to drop from the default set for the container. items: type: "string" example: - "CAP_NET_RAW" Ulimits: description: | A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" type: "array" items: type: "object" properties: Name: description: "Name of ulimit" type: "string" Soft: description: "Soft limit" type: "integer" Hard: description: "Hard limit" type: "integer" NetworkAttachmentSpec: description: | Read-only spec type for non-swarm containers attached to swarm overlay networks. <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. type: "object" properties: ContainerID: description: "ID of the container represented by this task" type: "string" Resources: description: | Resource requirements which apply to each individual container created as part of the service. type: "object" properties: Limits: description: "Define resources limits." $ref: "#/definitions/Limit" Reservation: description: "Define resources reservation." $ref: "#/definitions/ResourceObject" RestartPolicy: description: | Specification for the restart policy which applies to containers created as part of this service. type: "object" properties: Condition: description: "Condition for restart." type: "string" enum: - "none" - "on-failure" - "any" Delay: description: "Delay between restart attempts." type: "integer" format: "int64" MaxAttempts: description: | Maximum attempts to restart a given container before giving up (default value is 0, which is ignored). type: "integer" format: "int64" default: 0 Window: description: | Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded). type: "integer" format: "int64" default: 0 Placement: type: "object" properties: Constraints: description: | An array of constraint expressions to limit the set of nodes where a task can be scheduled. Constraint expressions can either use a _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find nodes that satisfy every expression (AND match). Constraints can match node or Docker Engine labels as follows: node attribute | matches | example ---------------------|--------------------------------|----------------------------------------------- `node.id` | Node ID | `node.id==2ivku8v2gvtg4` `node.hostname` | Node hostname | `node.hostname!=node-2` `node.role` | Node role (`manager`/`worker`) | `node.role==manager` `node.platform.os` | Node operating system | `node.platform.os==windows` `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` `node.labels` | User-defined node labels | `node.labels.security==high` `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` `engine.labels` apply to Docker Engine labels like operating system, drivers, etc. Swarm administrators add `node.labels` for operational purposes by using the [`node update endpoint`](#operation/NodeUpdate). type: "array" items: type: "string" example: - "node.hostname!=node3.corp.example.com" - "node.role!=manager" - "node.labels.type==production" - "node.platform.os==linux" - "node.platform.arch==x86_64" Preferences: description: | Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence. type: "array" items: type: "object" properties: Spread: type: "object" properties: SpreadDescriptor: description: | label descriptor, such as `engine.labels.az`. type: "string" example: - Spread: SpreadDescriptor: "node.labels.datacenter" - Spread: SpreadDescriptor: "node.labels.rack" MaxReplicas: description: | Maximum number of replicas for per node (default value is 0, which is unlimited) type: "integer" format: "int64" default: 0 Platforms: description: | Platforms stores all the platforms that the service's image can run on. This field is used in the platform filter for scheduling. If empty, then the platform filter is off, meaning there are no scheduling restrictions. type: "array" items: $ref: "#/definitions/Platform" ForceUpdate: description: | A counter that triggers an update even if no relevant parameters have been changed. type: "integer" Runtime: description: | Runtime is the type of runtime specified for the task executor. type: "string" Networks: description: "Specifies which networks the service should attach to." type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" LogDriver: description: | Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified. type: "object" properties: Name: type: "string" Options: type: "object" additionalProperties: type: "string" TaskState: type: "string" enum: - "new" - "allocated" - "pending" - "assigned" - "accepted" - "preparing" - "ready" - "starting" - "running" - "complete" - "shutdown" - "failed" - "rejected" - "remove" - "orphaned" Task: type: "object" properties: ID: description: "The ID of the task." type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Name: description: "Name of the task." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Spec: $ref: "#/definitions/TaskSpec" ServiceID: description: "The ID of the service this task is part of." type: "string" Slot: type: "integer" NodeID: description: "The ID of the node that this task is on." type: "string" AssignedGenericResources: $ref: "#/definitions/GenericResources" Status: type: "object" properties: Timestamp: type: "string" format: "dateTime" State: $ref: "#/definitions/TaskState" Message: type: "string" Err: type: "string" ContainerStatus: type: "object" properties: ContainerID: type: "string" PID: type: "integer" ExitCode: type: "integer" DesiredState: $ref: "#/definitions/TaskState" JobIteration: description: | If the Service this Task belongs to is a job-mode service, contains the JobIteration of the Service this Task was created for. Absent if the Task was created for a Replicated or Global Service. $ref: "#/definitions/ObjectVersion" example: ID: "0kzzo1i0y4jz6027t0k7aezc7" Version: Index: 71 CreatedAt: "2016-06-07T21:07:31.171892745Z" UpdatedAt: "2016-06-07T21:07:31.376370513Z" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:31.290032978Z" State: "running" Message: "started" ContainerStatus: ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" PID: 677 DesiredState: "running" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.10/16" AssignedGenericResources: - DiscreteResourceSpec: Kind: "SSD" Value: 3 - NamedResourceSpec: Kind: "GPU" Value: "UUID1" - NamedResourceSpec: Kind: "GPU" Value: "UUID2" ServiceSpec: description: "User modifiable configuration for a service." properties: Name: description: "Name of the service." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" TaskTemplate: $ref: "#/definitions/TaskSpec" Mode: description: "Scheduling mode for the service." type: "object" properties: Replicated: type: "object" properties: Replicas: type: "integer" format: "int64" Global: type: "object" ReplicatedJob: description: | The mode used for services with a finite number of tasks that run to a completed state. type: "object" properties: MaxConcurrent: description: | The maximum number of replicas to run simultaneously. type: "integer" format: "int64" default: 1 TotalCompletions: description: | The total number of replicas desired to reach the Completed state. If unset, will default to the value of `MaxConcurrent` type: "integer" format: "int64" GlobalJob: description: | The mode used for services which run a task to the completed state on each valid node. type: "object" UpdateConfig: description: "Specification for the update strategy of the service." type: "object" properties: Parallelism: description: | Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism). type: "integer" format: "int64" Delay: description: "Amount of time between updates, in nanoseconds." type: "integer" format: "int64" FailureAction: description: | Action to take if an updated task fails to run, or stops running during the update. type: "string" enum: - "continue" - "pause" - "rollback" Monitor: description: | Amount of time to monitor each updated task for failures, in nanoseconds. type: "integer" format: "int64" MaxFailureRatio: description: | The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1. type: "number" default: 0 Order: description: | The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down. type: "string" enum: - "stop-first" - "start-first" RollbackConfig: description: "Specification for the rollback strategy of the service." type: "object" properties: Parallelism: description: | Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism). type: "integer" format: "int64" Delay: description: | Amount of time between rollback iterations, in nanoseconds. type: "integer" format: "int64" FailureAction: description: | Action to take if an rolled back task fails to run, or stops running during the rollback. type: "string" enum: - "continue" - "pause" Monitor: description: | Amount of time to monitor each rolled back task for failures, in nanoseconds. type: "integer" format: "int64" MaxFailureRatio: description: | The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1. type: "number" default: 0 Order: description: | The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down. type: "string" enum: - "stop-first" - "start-first" Networks: description: "Specifies which networks the service should attach to." type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" EndpointSpec: $ref: "#/definitions/EndpointSpec" EndpointPortConfig: type: "object" properties: Name: type: "string" Protocol: type: "string" enum: - "tcp" - "udp" - "sctp" TargetPort: description: "The port inside the container." type: "integer" PublishedPort: description: "The port on the swarm hosts." type: "integer" PublishMode: description: | The mode in which port is published. <p><br /></p> - "ingress" makes the target port accessible on every node, regardless of whether there is a task for the service running on that node or not. - "host" bypasses the routing mesh and publish the port directly on the swarm node where that service is running. type: "string" enum: - "ingress" - "host" default: "ingress" example: "ingress" EndpointSpec: description: "Properties that can be configured to access and load balance a service." type: "object" properties: Mode: description: | The mode of resolution to use for internal load balancing between tasks. type: "string" enum: - "vip" - "dnsrr" default: "vip" Ports: description: | List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used. type: "array" items: $ref: "#/definitions/EndpointPortConfig" Service: type: "object" properties: ID: type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Spec: $ref: "#/definitions/ServiceSpec" Endpoint: type: "object" properties: Spec: $ref: "#/definitions/EndpointSpec" Ports: type: "array" items: $ref: "#/definitions/EndpointPortConfig" VirtualIPs: type: "array" items: type: "object" properties: NetworkID: type: "string" Addr: type: "string" UpdateStatus: description: "The status of a service update." type: "object" properties: State: type: "string" enum: - "updating" - "paused" - "completed" StartedAt: type: "string" format: "dateTime" CompletedAt: type: "string" format: "dateTime" Message: type: "string" ServiceStatus: description: | The status of the service's tasks. Provided only when requested as part of a ServiceList operation. type: "object" properties: RunningTasks: description: | The number of tasks for the service currently in the Running state. type: "integer" format: "uint64" example: 7 DesiredTasks: description: | The number of tasks for the service desired to be running. For replicated services, this is the replica count from the service spec. For global services, this is computed by taking count of all tasks for the service with a Desired State other than Shutdown. type: "integer" format: "uint64" example: 10 CompletedTasks: description: | The number of tasks for a job that are in the Completed state. This field must be cross-referenced with the service type, as the value of 0 may mean the service is not in a job mode, or it may mean the job-mode service has no tasks yet Completed. type: "integer" format: "uint64" JobStatus: description: | The status of the service when it is in one of ReplicatedJob or GlobalJob modes. Absent on Replicated and Global mode services. The JobIteration is an ObjectVersion, but unlike the Service's version, does not need to be sent with an update request. type: "object" properties: JobIteration: description: | JobIteration is a value increased each time a Job is executed, successfully or otherwise. "Executed", in this case, means the job as a whole has been started, not that an individual Task has been launched. A job is "Executed" when its ServiceSpec is updated. JobIteration can be used to disambiguate Tasks belonging to different executions of a job. Though JobIteration will increase with each subsequent execution, it may not necessarily increase by 1, and so JobIteration should not be used to $ref: "#/definitions/ObjectVersion" LastExecution: description: | The last time, as observed by the server, that this job was started. type: "string" format: "dateTime" example: ID: "9mnpnzenvg8p8tdbtq4wvbkcz" Version: Index: 19 CreatedAt: "2016-06-07T21:05:51.880065305Z" UpdatedAt: "2016-06-07T21:07:29.962229872Z" Spec: Name: "hopeful_cori" TaskTemplate: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ForceUpdate: 0 Mode: Replicated: Replicas: 1 UpdateConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Mode: "vip" Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 Endpoint: Spec: Mode: "vip" Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 VirtualIPs: - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" Addr: "10.255.0.2/16" - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" Addr: "10.255.0.3/16" ImageDeleteResponseItem: type: "object" properties: Untagged: description: "The image ID of an image that was untagged" type: "string" Deleted: description: "The image ID of an image that was deleted" type: "string" ServiceUpdateResponse: type: "object" properties: Warnings: description: "Optional warning messages" type: "array" items: type: "string" example: Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" ContainerSummary: type: "object" properties: Id: description: "The ID of this container" type: "string" x-go-name: "ID" Names: description: "The names that this container has been given" type: "array" items: type: "string" Image: description: "The name of the image used when creating this container" type: "string" ImageID: description: "The ID of the image that this container was created from" type: "string" Command: description: "Command to run when starting the container" type: "string" Created: description: "When the container was created" type: "integer" format: "int64" Ports: description: "The ports exposed by this container" type: "array" items: $ref: "#/definitions/Port" SizeRw: description: "The size of files that have been created or changed by this container" type: "integer" format: "int64" SizeRootFs: description: "The total size of all the files in this container" type: "integer" format: "int64" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" State: description: "The state of this container (e.g. `Exited`)" type: "string" Status: description: "Additional human-readable status of this container (e.g. `Exit 0`)" type: "string" HostConfig: type: "object" properties: NetworkMode: type: "string" NetworkSettings: description: "A summary of the container's network settings" type: "object" properties: Networks: type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" Mounts: type: "array" items: $ref: "#/definitions/Mount" Driver: description: "Driver represents a driver (network, logging, secrets)." type: "object" required: [Name] properties: Name: description: "Name of the driver." type: "string" x-nullable: false example: "some-driver" Options: description: "Key/value map of driver-specific options." type: "object" x-nullable: false additionalProperties: type: "string" example: OptionA: "value for driver-specific option A" OptionB: "value for driver-specific option B" SecretSpec: type: "object" properties: Name: description: "User-defined name of the secret." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Data: description: | Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) data to store as secret. This field is only used to _create_ a secret, and is not returned by other endpoints. type: "string" example: "" Driver: description: | Name of the secrets driver used to fetch the secret's value from an external secret store. $ref: "#/definitions/Driver" Templating: description: | Templating driver, if applicable Templating controls whether and how to evaluate the config payload as a template. If no driver is set, no templating is used. $ref: "#/definitions/Driver" Secret: type: "object" properties: ID: type: "string" example: "blt1owaxmitz71s9v5zh81zun" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" example: "2017-07-20T13:55:28.678958722Z" UpdatedAt: type: "string" format: "dateTime" example: "2017-07-20T13:55:28.678958722Z" Spec: $ref: "#/definitions/SecretSpec" ConfigSpec: type: "object" properties: Name: description: "User-defined name of the config." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Data: description: | Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) config data. type: "string" Templating: description: | Templating driver, if applicable Templating controls whether and how to evaluate the config payload as a template. If no driver is set, no templating is used. $ref: "#/definitions/Driver" Config: type: "object" properties: ID: type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Spec: $ref: "#/definitions/ConfigSpec" ContainerState: description: | ContainerState stores container's running state. It's part of ContainerJSONBase and will be returned by the "inspect" command. type: "object" properties: Status: description: | String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead". type: "string" enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] example: "running" Running: description: | Whether this container is running. Note that a running container can be _paused_. The `Running` and `Paused` booleans are not mutually exclusive: When pausing a container (on Linux), the freezer cgroup is used to suspend all processes in the container. Freezing the process requires the process to be running. As a result, paused containers are both `Running` _and_ `Paused`. Use the `Status` field instead to determine if a container's state is "running". type: "boolean" example: true Paused: description: "Whether this container is paused." type: "boolean" example: false Restarting: description: "Whether this container is restarting." type: "boolean" example: false OOMKilled: description: | Whether this container has been killed because it ran out of memory. type: "boolean" example: false Dead: type: "boolean" example: false Pid: description: "The process ID of this container" type: "integer" example: 1234 ExitCode: description: "The last exit code of this container" type: "integer" example: 0 Error: type: "string" StartedAt: description: "The time when this container was last started." type: "string" example: "2020-01-06T09:06:59.461876391Z" FinishedAt: description: "The time when this container last exited." type: "string" example: "2020-01-06T09:07:59.461876391Z" Health: x-nullable: true $ref: "#/definitions/Health" SystemVersion: type: "object" description: | Response of Engine API: GET "/version" properties: Platform: type: "object" required: [Name] properties: Name: type: "string" Components: type: "array" description: | Information about system components items: type: "object" x-go-name: ComponentVersion required: [Name, Version] properties: Name: description: | Name of the component type: "string" example: "Engine" Version: description: | Version of the component type: "string" x-nullable: false example: "19.03.12" Details: description: | Key/value pairs of strings with additional information about the component. These values are intended for informational purposes only, and their content is not defined, and not part of the API specification. These messages can be printed by the client as information to the user. type: "object" x-nullable: true Version: description: "The version of the daemon" type: "string" example: "19.03.12" ApiVersion: description: | The default (and highest) API version that is supported by the daemon type: "string" example: "1.40" MinAPIVersion: description: | The minimum API version that is supported by the daemon type: "string" example: "1.12" GitCommit: description: | The Git commit of the source code that was used to build the daemon type: "string" example: "48a66213fe" GoVersion: description: | The version Go used to compile the daemon, and the version of the Go runtime in use. type: "string" example: "go1.13.14" Os: description: | The operating system that the daemon is running on ("linux" or "windows") type: "string" example: "linux" Arch: description: | The architecture that the daemon is running on type: "string" example: "amd64" KernelVersion: description: | The kernel version (`uname -r`) that the daemon is running on. This field is omitted when empty. type: "string" example: "4.19.76-linuxkit" Experimental: description: | Indicates if the daemon is started with experimental features enabled. This field is omitted when empty / false. type: "boolean" example: true BuildTime: description: | The date and time that the daemon was compiled. type: "string" example: "2020-06-22T15:49:27.000000000+00:00" SystemInfo: type: "object" properties: ID: description: | Unique identifier of the daemon. <p><br /></p> > **Note**: The format of the ID itself is not part of the API, and > should not be considered stable. type: "string" example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" Containers: description: "Total number of containers on the host." type: "integer" example: 14 ContainersRunning: description: | Number of containers with status `"running"`. type: "integer" example: 3 ContainersPaused: description: | Number of containers with status `"paused"`. type: "integer" example: 1 ContainersStopped: description: | Number of containers with status `"stopped"`. type: "integer" example: 10 Images: description: | Total number of images on the host. Both _tagged_ and _untagged_ (dangling) images are counted. type: "integer" example: 508 Driver: description: "Name of the storage driver in use." type: "string" example: "overlay2" DriverStatus: description: | Information specific to the storage driver, provided as "label" / "value" pairs. This information is provided by the storage driver, and formatted in a way consistent with the output of `docker info` on the command line. <p><br /></p> > **Note**: The information returned in this field, including the > formatting of values and labels, should not be considered stable, > and may change without notice. type: "array" items: type: "array" items: type: "string" example: - ["Backing Filesystem", "extfs"] - ["Supports d_type", "true"] - ["Native Overlay Diff", "true"] DockerRootDir: description: | Root directory of persistent Docker state. Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` on Windows. type: "string" example: "/var/lib/docker" Plugins: $ref: "#/definitions/PluginsInfo" MemoryLimit: description: "Indicates if the host has memory limit support enabled." type: "boolean" example: true SwapLimit: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true KernelMemory: description: | Indicates if the host has kernel memory limit support enabled. <p><br /></p> > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated > `kmem.limit_in_bytes`. type: "boolean" example: true CpuCfsPeriod: description: | Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host. type: "boolean" example: true CpuCfsQuota: description: | Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host. type: "boolean" example: true CPUShares: description: | Indicates if CPU Shares limiting is supported by the host. type: "boolean" example: true CPUSet: description: | Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) type: "boolean" example: true PidsLimit: description: "Indicates if the host kernel has PID limit support enabled." type: "boolean" example: true OomKillDisable: description: "Indicates if OOM killer disable is supported on the host." type: "boolean" IPv4Forwarding: description: "Indicates IPv4 forwarding is enabled." type: "boolean" example: true BridgeNfIptables: description: "Indicates if `bridge-nf-call-iptables` is available on the host." type: "boolean" example: true BridgeNfIp6tables: description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." type: "boolean" example: true Debug: description: | Indicates if the daemon is running in debug-mode / with debug-level logging enabled. type: "boolean" example: true NFd: description: | The total number of file Descriptors in use by the daemon process. This information is only returned if debug-mode is enabled. type: "integer" example: 64 NGoroutines: description: | The number of goroutines that currently exist. This information is only returned if debug-mode is enabled. type: "integer" example: 174 SystemTime: description: | Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" example: "2017-08-08T20:28:29.06202363Z" LoggingDriver: description: | The logging driver to use as a default for new containers. type: "string" CgroupDriver: description: | The driver to use for managing cgroups. type: "string" enum: ["cgroupfs", "systemd", "none"] default: "cgroupfs" example: "cgroupfs" CgroupVersion: description: | The version of the cgroup. type: "string" enum: ["1", "2"] default: "1" example: "1" NEventsListener: description: "Number of event listeners subscribed." type: "integer" example: 30 KernelVersion: description: | Kernel version of the host. On Linux, this information obtained from `uname`. On Windows this information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd> registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. type: "string" example: "4.9.38-moby" OperatingSystem: description: | Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" or "Windows Server 2016 Datacenter" type: "string" example: "Alpine Linux v3.5" OSVersion: description: | Version of the host's operating system <p><br /></p> > **Note**: The information returned in this field, including its > very existence, and the formatting of values, should not be considered > stable, and may change without notice. type: "string" example: "16.04" OSType: description: | Generic type of the operating system of the host, as returned by the Go runtime (`GOOS`). Currently returned values are "linux" and "windows". A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). type: "string" example: "linux" Architecture: description: | Hardware architecture of the host, as returned by the Go runtime (`GOARCH`). A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). type: "string" example: "x86_64" NCPU: description: | The number of logical CPUs usable by the daemon. The number of available CPUs is checked by querying the operating system when the daemon starts. Changes to operating system CPU allocation after the daemon is started are not reflected. type: "integer" example: 4 MemTotal: description: | Total amount of physical memory available on the host, in bytes. type: "integer" format: "int64" example: 2095882240 IndexServerAddress: description: | Address / URL of the index server that is used for image search, and as a default for user authentication for Docker Hub and Docker Cloud. default: "https://index.docker.io/v1/" type: "string" example: "https://index.docker.io/v1/" RegistryConfig: $ref: "#/definitions/RegistryServiceConfig" GenericResources: $ref: "#/definitions/GenericResources" HttpProxy: description: | HTTP-proxy configured for the daemon. This value is obtained from the [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL are masked in the API response. Containers do not automatically inherit this configuration. type: "string" example: "http://xxxxx:[email protected]:8080" HttpsProxy: description: | HTTPS-proxy configured for the daemon. This value is obtained from the [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL are masked in the API response. Containers do not automatically inherit this configuration. type: "string" example: "https://xxxxx:[email protected]:4443" NoProxy: description: | Comma-separated list of domain extensions for which no proxy should be used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Containers do not automatically inherit this configuration. type: "string" example: "*.local, 169.254/16" Name: description: "Hostname of the host." type: "string" example: "node5.corp.example.com" Labels: description: | User-defined labels (key/value metadata) as set on the daemon. <p><br /></p> > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, > set through the daemon configuration, and _node_ labels, set from a > manager node in the Swarm. Node labels are not included in this > field. Node labels can be retrieved using the `/nodes/(id)` endpoint > on a manager node in the Swarm. type: "array" items: type: "string" example: ["storage=ssd", "production"] ExperimentalBuild: description: | Indicates if experimental features are enabled on the daemon. type: "boolean" example: true ServerVersion: description: | Version string of the daemon. > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) > returns the Swarm version instead of the daemon version, for example > `swarm/1.2.8`. type: "string" example: "17.06.0-ce" ClusterStore: description: | URL of the distributed storage backend. The storage backend is used for multihost networking (to store network and endpoint information) and by the node discovery mechanism. <p><br /></p> > **Deprecated**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. type: "string" example: "consul://consul.corp.example.com:8600/some/path" ClusterAdvertise: description: | The network endpoint that the Engine advertises for the purpose of node discovery. ClusterAdvertise is a `host:port` combination on which the daemon is reachable by other hosts. <p><br /></p> > **Deprecated**: This field is only propagated when using standalone Swarm > mode, and overlay networking using an external k/v store. Overlay > networks with Swarm mode enabled use the built-in raft store, and > this field will be empty. type: "string" example: "node5.corp.example.com:8000" Runtimes: description: | List of [OCI compliant](https://github.com/opencontainers/runtime-spec) runtimes configured on the daemon. Keys hold the "name" used to reference the runtime. The Docker daemon relies on an OCI compliant runtime (invoked via the `containerd` daemon) as its interface to the Linux kernel namespaces, cgroups, and SELinux. The default runtime is `runc`, and automatically configured. Additional runtimes can be configured by the user and will be listed here. type: "object" additionalProperties: $ref: "#/definitions/Runtime" default: runc: path: "runc" example: runc: path: "runc" runc-master: path: "/go/bin/runc" custom: path: "/usr/local/bin/my-oci-runtime" runtimeArgs: ["--debug", "--systemd-cgroup=false"] DefaultRuntime: description: | Name of the default OCI runtime that is used when starting containers. The default can be overridden per-container at create time. type: "string" default: "runc" example: "runc" Swarm: $ref: "#/definitions/SwarmInfo" LiveRestoreEnabled: description: | Indicates if live restore is enabled. If enabled, containers are kept running when the daemon is shutdown or upon daemon start if running containers are detected. type: "boolean" default: false example: false Isolation: description: | Represents the isolation technology to use as a default for containers. The supported values are platform-specific. If no isolation value is specified on daemon start, on Windows client, the default is `hyperv`, and on Windows server, the default is `process`. This option is currently not used on other platforms. default: "default" type: "string" enum: - "default" - "hyperv" - "process" InitBinary: description: | Name and, optional, path of the `docker-init` binary. If the path is omitted, the daemon searches the host's `$PATH` for the binary and uses the first result. type: "string" example: "docker-init" ContainerdCommit: $ref: "#/definitions/Commit" RuncCommit: $ref: "#/definitions/Commit" InitCommit: $ref: "#/definitions/Commit" SecurityOptions: description: | List of security features that are enabled on the daemon, such as apparmor, seccomp, SELinux, user-namespaces (userns), and rootless. Additional configuration options for each security feature may be present, and are included as a comma-separated list of key/value pairs. type: "array" items: type: "string" example: - "name=apparmor" - "name=seccomp,profile=default" - "name=selinux" - "name=userns" - "name=rootless" ProductLicense: description: | Reports a summary of the product license on the daemon. If a commercial license has been applied to the daemon, information such as number of nodes, and expiration are included. type: "string" example: "Community Engine" DefaultAddressPools: description: | List of custom default address pools for local networks, which can be specified in the daemon.json file or dockerd option. Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 10.10.[0-255].0/24 address pools. type: "array" items: type: "object" properties: Base: description: "The network address in CIDR format" type: "string" example: "10.10.0.0/16" Size: description: "The network pool size" type: "integer" example: "24" Warnings: description: | List of warnings / informational messages about missing features, or issues related to the daemon configuration. These messages can be printed by the client as information to the user. type: "array" items: type: "string" example: - "WARNING: No memory limit support" - "WARNING: bridge-nf-call-iptables is disabled" - "WARNING: bridge-nf-call-ip6tables is disabled" # PluginsInfo is a temp struct holding Plugins name # registered with docker daemon. It is used by Info struct PluginsInfo: description: | Available plugins per type. <p><br /></p> > **Note**: Only unmanaged (V1) plugins are included in this list. > V1 plugins are "lazily" loaded, and are not returned in this list > if there is no resource using the plugin. type: "object" properties: Volume: description: "Names of available volume-drivers, and network-driver plugins." type: "array" items: type: "string" example: ["local"] Network: description: "Names of available network-drivers, and network-driver plugins." type: "array" items: type: "string" example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] Authorization: description: "Names of available authorization plugins." type: "array" items: type: "string" example: ["img-authz-plugin", "hbm"] Log: description: "Names of available logging-drivers, and logging-driver plugins." type: "array" items: type: "string" example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] RegistryServiceConfig: description: | RegistryServiceConfig stores daemon registry services configuration. type: "object" x-nullable: true properties: AllowNondistributableArtifactsCIDRs: description: | List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). Some images (for example, Windows base images) contain artifacts whose distribution is restricted by license. When these images are pushed to a registry, restricted artifacts are not included. This configuration override this behavior, and enables the daemon to push nondistributable artifacts to all registries whose resolved IP address is within the subnet described by the CIDR syntax. This option is useful when pushing images containing nondistributable artifacts to a registry on an air-gapped network so hosts on that network can pull the images without connecting to another server. > **Warning**: Nondistributable artifacts typically have restrictions > on how and where they can be distributed and shared. Only use this > feature to push artifacts to private registries and ensure that you > are in compliance with any terms that cover redistributing > nondistributable artifacts. type: "array" items: type: "string" example: ["::1/128", "127.0.0.0/8"] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `<hostname>[:<port>]` or `<IP address>[:<port>]`. Some images (for example, Windows base images) contain artifacts whose distribution is restricted by license. When these images are pushed to a registry, restricted artifacts are not included. This configuration override this behavior for the specified registries. This option is useful when pushing images containing nondistributable artifacts to a registry on an air-gapped network so hosts on that network can pull the images without connecting to another server. > **Warning**: Nondistributable artifacts typically have restrictions > on how and where they can be distributed and shared. Only use this > feature to push artifacts to private registries and ensure that you > are in compliance with any terms that cover redistributing > nondistributable artifacts. type: "array" items: type: "string" example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. By default, local registries (`127.0.0.0/8`) are configured as insecure. All other registries are secure. Communicating with an insecure registry is not possible if the daemon assumes that registry is secure. This configuration override this behavior, insecure communication with registries whose resolved IP address is within the subnet described by the CIDR syntax. Registries can also be marked insecure by hostname. Those registries are listed under `IndexConfigs` and have their `Secure` field set to `false`. > **Warning**: Using this option can be useful when running a local > registry, but introduces security vulnerabilities. This option > should therefore ONLY be used for testing purposes. For increased > security, users should add their CA to their system's list of trusted > CAs instead of enabling this option. type: "array" items: type: "string" example: ["::1/128", "127.0.0.0/8"] IndexConfigs: type: "object" additionalProperties: $ref: "#/definitions/IndexInfo" example: "127.0.0.1:5000": "Name": "127.0.0.1:5000" "Mirrors": [] "Secure": false "Official": false "[2001:db8:a0b:12f0::1]:80": "Name": "[2001:db8:a0b:12f0::1]:80" "Mirrors": [] "Secure": false "Official": false "docker.io": Name: "docker.io" Mirrors: ["https://hub-mirror.corp.example.com:5000/"] Secure: true Official: true "registry.internal.corp.example.com:3000": Name: "registry.internal.corp.example.com:3000" Mirrors: [] Secure: false Official: false Mirrors: description: | List of registry URLs that act as a mirror for the official (`docker.io`) registry. type: "array" items: type: "string" example: - "https://hub-mirror.corp.example.com:5000/" - "https://[2001:db8:a0b:12f0::1]/" IndexInfo: description: IndexInfo contains information about a registry. type: "object" x-nullable: true properties: Name: description: | Name of the registry, such as "docker.io". type: "string" example: "docker.io" Mirrors: description: | List of mirrors, expressed as URIs. type: "array" items: type: "string" example: - "https://hub-mirror.corp.example.com:5000/" - "https://registry-2.docker.io/" - "https://registry-3.docker.io/" Secure: description: | Indicates if the registry is part of the list of insecure registries. If `false`, the registry is insecure. Insecure registries accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. > **Warning**: Insecure registries can be useful when running a local > registry. However, because its use creates security vulnerabilities > it should ONLY be enabled for testing purposes. For increased > security, users should add their CA to their system's list of > trusted CAs instead of enabling this option. type: "boolean" example: true Official: description: | Indicates whether this is an official registry (i.e., Docker Hub / docker.io) type: "boolean" example: true Runtime: description: | Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) runtime. The runtime is invoked by the daemon via the `containerd` daemon. OCI runtimes act as an interface to the Linux kernel namespaces, cgroups, and SELinux. type: "object" properties: path: description: | Name and, optional, path, of the OCI executable binary. If the path is omitted, the daemon searches the host's `$PATH` for the binary and uses the first result. type: "string" example: "/usr/local/bin/my-oci-runtime" runtimeArgs: description: | List of command-line arguments to pass to the runtime when invoked. type: "array" x-nullable: true items: type: "string" example: ["--debug", "--systemd-cgroup=false"] Commit: description: | Commit holds the Git-commit (SHA1) that a binary was built from, as reported in the version-string of external tools, such as `containerd`, or `runC`. type: "object" properties: ID: description: "Actual commit ID of external tool." type: "string" example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" Expected: description: | Commit ID of external tool expected by dockerd as set at build time. type: "string" example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" SwarmInfo: description: | Represents generic information about swarm. type: "object" properties: NodeID: description: "Unique identifier of for this node in the swarm." type: "string" default: "" example: "k67qz4598weg5unwwffg6z1m1" NodeAddr: description: | IP address at which this node can be reached by other nodes in the swarm. type: "string" default: "" example: "10.0.0.46" LocalNodeState: $ref: "#/definitions/LocalNodeState" ControlAvailable: type: "boolean" default: false example: true Error: type: "string" default: "" RemoteManagers: description: | List of ID's and addresses of other managers in the swarm. type: "array" default: null x-nullable: true items: $ref: "#/definitions/PeerNode" example: - NodeID: "71izy0goik036k48jg985xnds" Addr: "10.0.0.158:2377" - NodeID: "79y6h1o4gv8n120drcprv5nmc" Addr: "10.0.0.159:2377" - NodeID: "k67qz4598weg5unwwffg6z1m1" Addr: "10.0.0.46:2377" Nodes: description: "Total number of nodes in the swarm." type: "integer" x-nullable: true example: 4 Managers: description: "Total number of managers in the swarm." type: "integer" x-nullable: true example: 3 Cluster: $ref: "#/definitions/ClusterInfo" LocalNodeState: description: "Current local status of this node." type: "string" default: "" enum: - "" - "inactive" - "pending" - "active" - "error" - "locked" example: "active" PeerNode: description: "Represents a peer-node in the swarm" properties: NodeID: description: "Unique identifier of for this node in the swarm." type: "string" Addr: description: | IP address and ports at which this node can be reached. type: "string" NetworkAttachmentConfig: description: | Specifies how a service should be attached to a particular network. type: "object" properties: Target: description: | The target network for attachment. Must be a network name or ID. type: "string" Aliases: description: | Discoverable alternate names for the service on this network. type: "array" items: type: "string" DriverOpts: description: | Driver attachment options for the network target. type: "object" additionalProperties: type: "string" EventActor: description: | Actor describes something that generates events, like a container, network, or a volume. type: "object" properties: ID: description: "The ID of the object emitting the event" type: "string" example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" Attributes: description: | Various key/value attributes of the object, depending on its type. type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-label-value" image: "alpine:latest" name: "my-container" EventMessage: description: | EventMessage represents the information an event contains. type: "object" title: "SystemEventsResponse" properties: Type: description: "The type of object emitting the event" type: "string" enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] example: "container" Action: description: "The type of event" type: "string" example: "create" Actor: $ref: "#/definitions/EventActor" scope: description: | Scope of the event. Engine events are `local` scope. Cluster (Swarm) events are `swarm` scope. type: "string" enum: ["local", "swarm"] time: description: "Timestamp of event" type: "integer" format: "int64" example: 1629574695 timeNano: description: "Timestamp of event, with nanosecond accuracy" type: "integer" format: "int64" example: 1629574695515050031 OCIDescriptor: type: "object" x-go-name: Descriptor description: | A descriptor struct containing digest, media type, and size, as defined in the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). properties: mediaType: description: | The media type of the object this schema refers to. type: "string" example: "application/vnd.docker.distribution.manifest.v2+json" digest: description: | The digest of the targeted content. type: "string" example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" size: description: | The size in bytes of the blob. type: "integer" format: "int64" example: 3987495 # TODO Not yet including these fields for now, as they are nil / omitted in our response. # urls: # description: | # List of URLs from which this object MAY be downloaded. # type: "array" # items: # type: "string" # format: "uri" # annotations: # description: | # Arbitrary metadata relating to the targeted content. # type: "object" # additionalProperties: # type: "string" # platform: # $ref: "#/definitions/OCIPlatform" OCIPlatform: type: "object" x-go-name: Platform description: | Describes the platform which the image in the manifest runs on, as defined in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). properties: architecture: description: | The CPU architecture, for example `amd64` or `ppc64`. type: "string" example: "arm" os: description: | The operating system, for example `linux` or `windows`. type: "string" example: "windows" os.version: description: | Optional field specifying the operating system version, for example on Windows `10.0.19041.1165`. type: "string" example: "10.0.19041.1165" os.features: description: | Optional field specifying an array of strings, each listing a required OS feature (for example on Windows `win32k`). type: "array" items: type: "string" example: - "win32k" variant: description: | Optional field specifying a variant of the CPU, for example `v7` to specify ARMv7 when architecture is `arm`. type: "string" example: "v7" DistributionInspect: type: "object" x-go-name: DistributionInspect title: "DistributionInspectResponse" required: [Descriptor, Platforms] description: | Describes the result obtained from contacting the registry to retrieve image metadata. properties: Descriptor: $ref: "#/definitions/OCIDescriptor" Platforms: type: "array" description: | An array containing all platforms supported by the image. items: $ref: "#/definitions/OCIPlatform" paths: /containers/json: get: summary: "List containers" description: | Returns a list of containers. For details on the format, see the [inspect endpoint](#operation/ContainerInspect). Note that it uses a different, smaller representation of a container than inspecting a single container. For example, the list of linked containers is not propagated . operationId: "ContainerList" produces: - "application/json" parameters: - name: "all" in: "query" description: | Return all containers. By default, only running containers are shown. type: "boolean" default: false - name: "limit" in: "query" description: | Return this number of most recently created containers, including non-running ones. type: "integer" - name: "size" in: "query" description: | Return the size of container as fields `SizeRw` and `SizeRootFs`. type: "boolean" default: false - name: "filters" in: "query" description: | Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`) - `before`=(`<container id>` or `<container name>`) - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) - `exited=<int>` containers with exit code of `<int>` - `health`=(`starting`|`healthy`|`unhealthy`|`none`) - `id=<ID>` a container's ID - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) - `is-task=`(`true`|`false`) - `label=key` or `label="key=value"` of a container label - `name=<name>` a container's name - `network`=(`<network id>` or `<network name>`) - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) - `since`=(`<container id>` or `<container name>`) - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) - `volume`=(`<volume name>` or `<mount point destination>`) type: "string" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/ContainerSummary" examples: application/json: - Id: "8dfafdbc3a40" Names: - "/boring_feynman" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 1" Created: 1367854155 State: "Exited" Status: "Exit 0" Ports: - PrivatePort: 2222 PublicPort: 3333 Type: "tcp" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" Gateway: "172.17.0.1" IPAddress: "172.17.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:02" Mounts: - Name: "fac362...80535" Source: "/data" Destination: "/data" Driver: "local" Mode: "ro,Z" RW: false Propagation: "" - Id: "9cd87474be90" Names: - "/coolName" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 222222" Created: 1367854155 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" Gateway: "172.17.0.1" IPAddress: "172.17.0.8" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:08" Mounts: [] - Id: "3176a2479c92" Names: - "/sleepy_dog" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 3333333333333333" Created: 1367854154 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" Gateway: "172.17.0.1" IPAddress: "172.17.0.6" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:06" Mounts: [] - Id: "4cb07b47f9fb" Names: - "/running_cat" Image: "ubuntu:latest" ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" Command: "echo 444444444444444444444444444444444" Created: 1367854152 State: "Exited" Status: "Exit 0" Ports: [] Labels: {} SizeRw: 12288 SizeRootFs: 0 HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" Gateway: "172.17.0.1" IPAddress: "172.17.0.5" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:11:00:05" Mounts: [] 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /containers/create: post: summary: "Create a container" operationId: "ContainerCreate" consumes: - "application/json" - "application/octet-stream" produces: - "application/json" parameters: - name: "name" in: "query" description: | Assign the specified name to the container. Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. type: "string" pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - name: "body" in: "body" description: "Container to create" schema: allOf: - $ref: "#/definitions/ContainerConfig" - type: "object" properties: HostConfig: $ref: "#/definitions/HostConfig" NetworkingConfig: $ref: "#/definitions/NetworkingConfig" example: Hostname: "" Domainname: "" User: "" AttachStdin: false AttachStdout: true AttachStderr: true Tty: false OpenStdin: false StdinOnce: false Env: - "FOO=bar" - "BAZ=quux" Cmd: - "date" Entrypoint: "" Image: "ubuntu" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" Volumes: /volumes/data: {} WorkingDir: "" NetworkDisabled: false MacAddress: "12:34:56:78:9a:bc" ExposedPorts: 22/tcp: {} StopSignal: "SIGTERM" StopTimeout: 10 HostConfig: Binds: - "/tmp:/tmp" Links: - "redis3:redis" Memory: 0 MemorySwap: 0 MemoryReservation: 0 KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 CpuPeriod: 100000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 CpuQuota: 50000 CpusetCpus: "0,1" CpusetMems: "0,1" MaximumIOps: 0 MaximumIOBps: 0 BlkioWeight: 300 BlkioWeightDevice: - {} BlkioDeviceReadBps: - {} BlkioDeviceReadIOps: - {} BlkioDeviceWriteBps: - {} BlkioDeviceWriteIOps: - {} DeviceRequests: - Driver: "nvidia" Count: -1 DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] Capabilities: [["gpu", "nvidia", "compute"]] Options: property1: "string" property2: "string" MemorySwappiness: 60 OomKillDisable: false OomScoreAdj: 500 PidMode: "" PidsLimit: 0 PortBindings: 22/tcp: - HostPort: "11022" PublishAllPorts: false Privileged: false ReadonlyRootfs: false Dns: - "8.8.8.8" DnsOptions: - "" DnsSearch: - "" VolumesFrom: - "parent" - "other:ro" CapAdd: - "NET_ADMIN" CapDrop: - "MKNOD" GroupAdd: - "newgroup" RestartPolicy: Name: "" MaximumRetryCount: 0 AutoRemove: true NetworkMode: "bridge" Devices: [] Ulimits: - {} LogConfig: Type: "json-file" Config: {} SecurityOpt: [] StorageOpt: {} CgroupParent: "" VolumeDriver: "" ShmSize: 67108864 NetworkingConfig: EndpointsConfig: isolated_nw: IPAMConfig: IPv4Address: "172.20.30.33" IPv6Address: "2001:db8:abcd::3033" LinkLocalIPs: - "169.254.34.68" - "fe80::3468" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" required: true responses: 201: description: "Container created successfully" schema: type: "object" title: "ContainerCreateResponse" description: "OK response to ContainerCreate operation" required: [Id, Warnings] properties: Id: description: "The ID of the created container" type: "string" x-nullable: false Warnings: description: "Warnings encountered when creating the container" type: "array" x-nullable: false items: type: "string" examples: application/json: Id: "e90e34656806" Warnings: [] 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such image" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: c2ada9df5af8" 409: description: "conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /containers/{id}/json: get: summary: "Inspect a container" description: "Return low-level information about a container." operationId: "ContainerInspect" produces: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "ContainerInspectResponse" properties: Id: description: "The ID of the container" type: "string" Created: description: "The time the container was created" type: "string" Path: description: "The path to the command being run" type: "string" Args: description: "The arguments to the command being run" type: "array" items: type: "string" State: x-nullable: true $ref: "#/definitions/ContainerState" Image: description: "The container's image ID" type: "string" ResolvConfPath: type: "string" HostnamePath: type: "string" HostsPath: type: "string" LogPath: type: "string" Name: type: "string" RestartCount: type: "integer" Driver: type: "string" Platform: type: "string" MountLabel: type: "string" ProcessLabel: type: "string" AppArmorProfile: type: "string" ExecIDs: description: "IDs of exec instances that are running in the container." type: "array" items: type: "string" x-nullable: true HostConfig: $ref: "#/definitions/HostConfig" GraphDriver: $ref: "#/definitions/GraphDriverData" SizeRw: description: | The size of files that have been created or changed by this container. type: "integer" format: "int64" SizeRootFs: description: "The total size of all the files in this container." type: "integer" format: "int64" Mounts: type: "array" items: $ref: "#/definitions/MountPoint" Config: $ref: "#/definitions/ContainerConfig" NetworkSettings: $ref: "#/definitions/NetworkSettings" examples: application/json: AppArmorProfile: "" Args: - "-c" - "exit 9" Config: AttachStderr: true AttachStdin: false AttachStdout: true Cmd: - "/bin/sh" - "-c" - "exit 9" Domainname: "" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Healthcheck: Test: ["CMD-SHELL", "exit 0"] Hostname: "ba033ac44011" Image: "ubuntu" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" MacAddress: "" NetworkDisabled: false OpenStdin: false StdinOnce: false Tty: false User: "" Volumes: /volumes/data: {} WorkingDir: "" StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" Driver: "devicemapper" ExecIDs: - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 BlkioWeight: 0 BlkioWeightDevice: - {} BlkioDeviceReadBps: - {} BlkioDeviceWriteBps: - {} BlkioDeviceReadIOps: - {} BlkioDeviceWriteIOps: - {} ContainerIDFile: "" CpusetCpus: "" CpusetMems: "" CpuPercent: 80 CpuShares: 0 CpuPeriod: 100000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 Devices: [] DeviceRequests: - Driver: "nvidia" Count: -1 DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] Capabilities: [["gpu", "nvidia", "compute"]] Options: property1: "string" property2: "string" IpcMode: "" LxcConf: [] Memory: 0 MemorySwap: 0 MemoryReservation: 0 KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" PidMode: "" PortBindings: {} Privileged: false ReadonlyRootfs: false PublishAllPorts: false RestartPolicy: MaximumRetryCount: 2 Name: "on-failure" LogConfig: Type: "json-file" Sysctls: net.ipv4.ip_forward: "1" Ulimits: - {} VolumeDriver: "" ShmSize: 67108864 HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" MountLabel: "" Name: "/boring_euclid" NetworkSettings: Bridge: "" SandboxID: "" HairpinMode: false LinkLocalIPv6Address: "" LinkLocalIPv6PrefixLen: 0 SandboxKey: "" EndpointID: "" Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 IPAddress: "" IPPrefixLen: 0 IPv6Gateway: "" MacAddress: "" Networks: bridge: NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" Gateway: "172.17.0.1" IPAddress: "172.17.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:12:00:02" Path: "/bin/sh" ProcessLabel: "" ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" RestartCount: 1 State: Error: "" ExitCode: 9 FinishedAt: "2015-01-06T15:47:32.080254511Z" Health: Status: "healthy" FailingStreak: 0 Log: - Start: "2019-12-22T10:59:05.6385933Z" End: "2019-12-22T10:59:05.8078452Z" ExitCode: 0 Output: "" OOMKilled: false Dead: false Paused: false Pid: 0 Restarting: false Running: true StartedAt: "2015-01-06T15:47:32.072697474Z" Status: "running" Mounts: - Name: "fac362...80535" Source: "/data" Destination: "/data" Driver: "local" Mode: "ro,Z" RW: false Propagation: "" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "size" in: "query" type: "boolean" default: false description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" tags: ["Container"] /containers/{id}/top: get: summary: "List processes running inside a container" description: | On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows. operationId: "ContainerTop" responses: 200: description: "no error" schema: type: "object" title: "ContainerTopResponse" description: "OK response to ContainerTop operation" properties: Titles: description: "The ps column titles" type: "array" items: type: "string" Processes: description: | Each process running in the container, where each is process is an array of values corresponding to the titles. type: "array" items: type: "array" items: type: "string" examples: application/json: Titles: - "UID" - "PID" - "PPID" - "C" - "STIME" - "TTY" - "TIME" - "CMD" Processes: - - "root" - "13642" - "882" - "0" - "17:03" - "pts/0" - "00:00:00" - "/bin/bash" - - "root" - "13735" - "13642" - "0" - "17:06" - "pts/0" - "00:00:00" - "sleep 10" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "ps_args" in: "query" description: "The arguments to pass to `ps`. For example, `aux`" type: "string" default: "-ef" tags: ["Container"] /containers/{id}/logs: get: summary: "Get container logs" description: | Get `stdout` and `stderr` logs from a container. Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. operationId: "ContainerLogs" responses: 200: description: | logs returned as a stream in response body. For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). Note that unlike the attach endpoint, the logs endpoint does not upgrade the connection and does not set Content-Type. schema: type: "string" format: "binary" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "until" in: "query" description: "Only return logs before this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Container"] /containers/{id}/changes: get: summary: "Get changes on a container’s filesystem" description: | Returns which files in a container's filesystem have been added, deleted, or modified. The `Kind` of modification can be one of: - `0`: Modified - `1`: Added - `2`: Deleted operationId: "ContainerChanges" produces: ["application/json"] responses: 200: description: "The list of changes" schema: type: "array" items: type: "object" x-go-name: "ContainerChangeResponseItem" title: "ContainerChangeResponseItem" description: "change item in response to ContainerChanges operation" required: [Path, Kind] properties: Path: description: "Path to file that has changed" type: "string" x-nullable: false Kind: description: "Kind of change" type: "integer" format: "uint8" enum: [0, 1, 2] x-nullable: false examples: application/json: - Path: "/dev" Kind: 0 - Path: "/dev/kmsg" Kind: 1 - Path: "/test" Kind: 1 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/export: get: summary: "Export a container" description: "Export the contents of a container as a tarball." operationId: "ContainerExport" produces: - "application/octet-stream" responses: 200: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/stats: get: summary: "Get container stats based on resource usage" description: | This endpoint returns a live stream of a container’s resource usage statistics. The `precpu_stats` is the CPU statistic of the *previous* read, and is used to calculate the CPU usage percentage. It is not an exact copy of the `cpu_stats` field. If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is nil then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. On a cgroup v2 host, the following fields are not set * `blkio_stats`: all fields other than `io_service_bytes_recursive` * `cpu_stats`: `cpu_usage.percpu_usage` * `memory_stats`: `max_usage` and `failcnt` Also, `memory_stats.stats` fields are incompatible with cgroup v1. To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: * used_memory = `memory_stats.usage - memory_stats.stats.cache` * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` operationId: "ContainerStats" produces: ["application/json"] responses: 200: description: "no error" schema: type: "object" examples: application/json: read: "2015-01-08T22:57:31.547920715Z" pids_stats: current: 3 networks: eth0: rx_bytes: 5338 rx_dropped: 0 rx_errors: 0 rx_packets: 36 tx_bytes: 648 tx_dropped: 0 tx_errors: 0 tx_packets: 8 eth5: rx_bytes: 4641 rx_dropped: 0 rx_errors: 0 rx_packets: 26 tx_bytes: 690 tx_dropped: 0 tx_errors: 0 tx_packets: 9 memory_stats: stats: total_pgmajfault: 0 cache: 0 mapped_file: 0 total_inactive_file: 0 pgpgout: 414 rss: 6537216 total_mapped_file: 0 writeback: 0 unevictable: 0 pgpgin: 477 total_unevictable: 0 pgmajfault: 0 total_rss: 6537216 total_rss_huge: 6291456 total_writeback: 0 total_inactive_anon: 0 rss_huge: 6291456 hierarchical_memory_limit: 67108864 total_pgfault: 964 total_active_file: 0 active_anon: 6537216 total_active_anon: 6537216 total_pgpgout: 414 total_cache: 0 inactive_anon: 0 active_file: 0 pgfault: 964 inactive_file: 0 total_pgpgin: 477 max_usage: 6651904 usage: 6537216 failcnt: 0 limit: 67108864 blkio_stats: {} cpu_stats: cpu_usage: percpu_usage: - 8646879 - 24472255 - 36438778 - 30657443 usage_in_usermode: 50000000 total_usage: 100215355 usage_in_kernelmode: 30000000 system_cpu_usage: 739306590000000 online_cpus: 4 throttling_data: periods: 0 throttled_periods: 0 throttled_time: 0 precpu_stats: cpu_usage: percpu_usage: - 8646879 - 24350896 - 36438778 - 30657443 usage_in_usermode: 50000000 total_usage: 100093996 usage_in_kernelmode: 30000000 system_cpu_usage: 9492140000000 online_cpus: 4 throttling_data: periods: 0 throttled_periods: 0 throttled_time: 0 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "stream" in: "query" description: | Stream the output. If false, the stats will be output once and then it will disconnect. type: "boolean" default: true - name: "one-shot" in: "query" description: | Only get a single stat instead of waiting for 2 cycles. Must be used with `stream=false`. type: "boolean" default: false tags: ["Container"] /containers/{id}/resize: post: summary: "Resize a container TTY" description: "Resize the TTY for a container." operationId: "ContainerResize" consumes: - "application/octet-stream" produces: - "text/plain" responses: 200: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "cannot resize container" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "h" in: "query" description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" description: "Width of the TTY session in characters" type: "integer" tags: ["Container"] /containers/{id}/start: post: summary: "Start a container" operationId: "ContainerStart" responses: 204: description: "no error" 304: description: "container already started" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. type: "string" tags: ["Container"] /containers/{id}/stop: post: summary: "Stop a container" operationId: "ContainerStop" responses: 204: description: "no error" 304: description: "container already stopped" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" type: "integer" tags: ["Container"] /containers/{id}/restart: post: summary: "Restart a container" operationId: "ContainerRestart" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" type: "integer" tags: ["Container"] /containers/{id}/kill: post: summary: "Kill a container" description: | Send a POSIX signal to a container, defaulting to killing to the container. operationId: "ContainerKill" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "container is not running" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "signal" in: "query" description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" type: "string" default: "SIGKILL" tags: ["Container"] /containers/{id}/update: post: summary: "Update a container" description: | Change various configuration options of a container without having to recreate it. operationId: "ContainerUpdate" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "The container has been updated." schema: type: "object" title: "ContainerUpdateResponse" description: "OK response to ContainerUpdate operation" properties: Warnings: type: "array" items: type: "string" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "update" in: "body" required: true schema: allOf: - $ref: "#/definitions/Resources" - type: "object" properties: RestartPolicy: $ref: "#/definitions/RestartPolicy" example: BlkioWeight: 300 CpuShares: 512 CpuPeriod: 100000 CpuQuota: 50000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 CpusetCpus: "0,1" CpusetMems: "0" Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" tags: ["Container"] /containers/{id}/rename: post: summary: "Rename a container" operationId: "ContainerRename" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "name already in use" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "name" in: "query" required: true description: "New name for the container" type: "string" tags: ["Container"] /containers/{id}/pause: post: summary: "Pause a container" description: | Use the freezer cgroup to suspend all processes in a container. Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the freezer cgroup the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. operationId: "ContainerPause" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/unpause: post: summary: "Unpause a container" description: "Resume a container which has been paused." operationId: "ContainerUnpause" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/attach: post: summary: "Attach to a container" description: | Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. ### Hijacking This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. This is the response from the daemon for an attach request: ``` HTTP/1.1 200 OK Content-Type: application/vnd.docker.raw-stream [STREAM] ``` After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. For example, the client sends this request to upgrade the connection: ``` POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 Upgrade: tcp Connection: Upgrade ``` The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: ``` HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp [STREAM] ``` ### Stream format When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). It is encoded on the first eight bytes like this: ```go header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} ``` `STREAM_TYPE` can be: - 0: `stdin` (is written on `stdout`) - 1: `stdout` - 2: `stderr` `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. The simplest way to implement this protocol is the following: 1. Read 8 bytes. 2. Choose `stdout` or `stderr` depending on the first byte. 3. Extract the frame size from the last four bytes. 4. Read the extracted size and output it on the correct output. 5. Goto 1. ### Stream format when using a TTY When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. operationId: "ContainerAttach" produces: - "application/vnd.docker.raw-stream" responses: 101: description: "no error, hints proxy about hijacking" 200: description: "no error, no upgrade header found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. type: "string" - name: "logs" in: "query" description: | Replay previous logs from the container. This is useful for attaching to a container that has started and you want to output everything since the container started. If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. type: "boolean" default: false - name: "stream" in: "query" description: | Stream attached streams from the time the request was made onwards. type: "boolean" default: false - name: "stdin" in: "query" description: "Attach to `stdin`" type: "boolean" default: false - name: "stdout" in: "query" description: "Attach to `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Attach to `stderr`" type: "boolean" default: false tags: ["Container"] /containers/{id}/attach/ws: get: summary: "Attach to a container via a websocket" operationId: "ContainerAttachWebsocket" responses: 101: description: "no error, hints proxy about hijacking" 200: description: "no error, no upgrade header found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`. type: "string" - name: "logs" in: "query" description: "Return logs" type: "boolean" default: false - name: "stream" in: "query" description: "Return stream" type: "boolean" default: false - name: "stdin" in: "query" description: "Attach to `stdin`" type: "boolean" default: false - name: "stdout" in: "query" description: "Attach to `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Attach to `stderr`" type: "boolean" default: false tags: ["Container"] /containers/{id}/wait: post: summary: "Wait for a container" description: "Block until a container stops, then returns the exit code." operationId: "ContainerWait" produces: ["application/json"] responses: 200: description: "The container has exit." schema: type: "object" title: "ContainerWaitResponse" description: "OK response to ContainerWait operation" required: [StatusCode] properties: StatusCode: description: "Exit code of the container" type: "integer" x-nullable: false Error: description: "container waiting error, if any" type: "object" properties: Message: description: "Details of an error" type: "string" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "condition" in: "query" description: | Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'. type: "string" default: "not-running" tags: ["Container"] /containers/{id}: delete: summary: "Remove a container" operationId: "ContainerDelete" responses: 204: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "conflict" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: | You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "v" in: "query" description: "Remove anonymous volumes associated with the container." type: "boolean" default: false - name: "force" in: "query" description: "If the container is running, kill it before removing it." type: "boolean" default: false - name: "link" in: "query" description: "Remove the specified link associated with the container." type: "boolean" default: false tags: ["Container"] /containers/{id}/archive: head: summary: "Get information about files in a container" description: | A response header `X-Docker-Container-Path-Stat` is returned, containing a base64 - encoded JSON object with some filesystem header information about the path. operationId: "ContainerArchiveInfo" responses: 200: description: "no error" headers: X-Docker-Container-Path-Stat: type: "string" description: | A base64 - encoded JSON object with some filesystem header information about the path 400: description: "Bad parameter" schema: allOf: - $ref: "#/definitions/ErrorResponse" - type: "object" properties: message: description: | The error message. Either "must specify path parameter" (path cannot be empty) or "not a directory" (path was asserted to be a directory but exists as a file). type: "string" x-nullable: false 404: description: "Container or path does not exist" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Resource in the container’s filesystem to archive." type: "string" tags: ["Container"] get: summary: "Get an archive of a filesystem resource in a container" description: "Get a tar archive of a resource in the filesystem of container id." operationId: "ContainerArchive" produces: ["application/x-tar"] responses: 200: description: "no error" 400: description: "Bad parameter" schema: allOf: - $ref: "#/definitions/ErrorResponse" - type: "object" properties: message: description: | The error message. Either "must specify path parameter" (path cannot be empty) or "not a directory" (path was asserted to be a directory but exists as a file). type: "string" x-nullable: false 404: description: "Container or path does not exist" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Resource in the container’s filesystem to archive." type: "string" tags: ["Container"] put: summary: "Extract an archive of files or folders to a directory in a container" description: "Upload a tar archive to be extracted to a path in the filesystem of container id." operationId: "PutContainerArchive" consumes: ["application/x-tar", "application/octet-stream"] responses: 200: description: "The content was extracted successfully" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 403: description: "Permission denied, the volume or container rootfs is marked as read-only." schema: $ref: "#/definitions/ErrorResponse" 404: description: "No such container or path does not exist inside the container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Path to a directory in the container to extract the archive’s contents into. " type: "string" - name: "noOverwriteDirNonDir" in: "query" description: | If `1`, `true`, or `True` then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa. type: "string" - name: "copyUIDGID" in: "query" description: | If `1`, `true`, then it will copy UID/GID maps to the dest file or dir type: "string" - name: "inputStream" in: "body" required: true description: | The input stream must be a tar archive compressed with one of the following algorithms: `identity` (no compression), `gzip`, `bzip2`, or `xz`. schema: type: "string" format: "binary" tags: ["Container"] /containers/prune: post: summary: "Delete stopped containers" produces: - "application/json" operationId: "ContainerPrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "ContainerPruneResponse" properties: ContainersDeleted: description: "Container IDs that were deleted" type: "array" items: type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /images/json: get: summary: "List Images" description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." operationId: "ImageList" produces: - "application/json" responses: 200: description: "Summary image data for the images matching the query" schema: type: "array" items: $ref: "#/definitions/ImageSummary" examples: application/json: - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" ParentId: "" RepoTags: - "ubuntu:12.04" - "ubuntu:precise" RepoDigests: - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" Created: 1474925151 Size: 103579269 VirtualSize: 103579269 SharedSize: 0 Labels: {} Containers: 2 - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" ParentId: "" RepoTags: - "ubuntu:12.10" - "ubuntu:quantal" RepoDigests: - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" Created: 1403128455 Size: 172064416 VirtualSize: 172064416 SharedSize: 0 Labels: {} Containers: 5 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "all" in: "query" description: "Show all images. Only images from a final layer (no children) are shown by default." type: "boolean" default: false - name: "filters" in: "query" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) - `dangling=true` - `label=key` or `label="key=value"` of an image label - `reference`=(`<image-name>[:<tag>]`) - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) type: "string" - name: "shared-size" in: "query" description: "Compute and show shared size as a `SharedSize` field on each image." type: "boolean" default: false - name: "digests" in: "query" description: "Show digest information as a `RepoDigests` field on each image." type: "boolean" default: false tags: ["Image"] /build: post: summary: "Build an image" description: | Build an image from a tar archive with a `Dockerfile` in it. The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. The build is canceled if the client drops the connection by quitting or being killed. operationId: "ImageBuild" consumes: - "application/octet-stream" produces: - "application/json" parameters: - name: "inputStream" in: "body" description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." schema: type: "string" format: "binary" - name: "dockerfile" in: "query" description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." type: "string" default: "Dockerfile" - name: "t" in: "query" description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." type: "string" - name: "extrahosts" in: "query" description: "Extra hosts to add to /etc/hosts" type: "string" - name: "remote" in: "query" description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." type: "string" - name: "q" in: "query" description: "Suppress verbose build output." type: "boolean" default: false - name: "nocache" in: "query" description: "Do not use the cache when building the image." type: "boolean" default: false - name: "cachefrom" in: "query" description: "JSON array of images used for build cache resolution." type: "string" - name: "pull" in: "query" description: "Attempt to pull the image even if an older image exists locally." type: "string" - name: "rm" in: "query" description: "Remove intermediate containers after a successful build." type: "boolean" default: true - name: "forcerm" in: "query" description: "Always remove intermediate containers, even upon failure." type: "boolean" default: false - name: "memory" in: "query" description: "Set memory limit for build." type: "integer" - name: "memswap" in: "query" description: "Total memory (memory + swap). Set as `-1` to disable swap." type: "integer" - name: "cpushares" in: "query" description: "CPU shares (relative weight)." type: "integer" - name: "cpusetcpus" in: "query" description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." type: "string" - name: "cpuperiod" in: "query" description: "The length of a CPU period in microseconds." type: "integer" - name: "cpuquota" in: "query" description: "Microseconds of CPU time that the container can get in a CPU period." type: "integer" - name: "buildargs" in: "query" description: > JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) type: "string" - name: "shmsize" in: "query" description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." type: "integer" - name: "squash" in: "query" description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" type: "boolean" - name: "labels" in: "query" description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." type: "string" - name: "networkmode" in: "query" description: | Sets the networking mode for the run commands during build. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken as a custom network's name or ID to which this container should connect to. type: "string" - name: "Content-type" in: "header" type: "string" enum: - "application/x-tar" default: "application/x-tar" - name: "X-Registry-Config" in: "header" description: | This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: ``` { "docker.example.com": { "username": "janedoe", "password": "hunter2" }, "https://index.docker.io/v1/": { "username": "mobydock", "password": "conta1n3rize14" } } ``` Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. type: "string" - name: "platform" in: "query" description: "Platform in the format os[/arch[/variant]]" type: "string" default: "" - name: "target" in: "query" description: "Target build stage" type: "string" default: "" - name: "outputs" in: "query" description: "BuildKit output configuration" type: "string" default: "" responses: 200: description: "no error" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /build/prune: post: summary: "Delete builder cache" produces: - "application/json" operationId: "BuildPrune" parameters: - name: "keep-storage" in: "query" description: "Amount of disk space in bytes to keep for cache" type: "integer" format: "int64" - name: "all" in: "query" type: "boolean" description: "Remove all types of build cache" - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters: - `until=<duration>`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') - `id=<id>` - `parent=<id>` - `type=<string>` - `description=<string>` - `inuse` - `shared` - `private` responses: 200: description: "No error" schema: type: "object" title: "BuildPruneResponse" properties: CachesDeleted: type: "array" items: description: "ID of build cache object" type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /images/create: post: summary: "Create an image" description: "Create an image by either pulling it from a registry or importing it." operationId: "ImageCreate" consumes: - "text/plain" - "application/octet-stream" produces: - "application/json" responses: 200: description: "no error" 404: description: "repository does not exist or no read access" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "fromImage" in: "query" description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." type: "string" - name: "fromSrc" in: "query" description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." type: "string" - name: "repo" in: "query" description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." type: "string" - name: "tag" in: "query" description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." type: "string" - name: "message" in: "query" description: "Set commit message for imported image." type: "string" - name: "inputImage" in: "body" description: "Image content if the value `-` has been specified in fromSrc query parameter" schema: type: "string" required: false - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "changes" in: "query" description: | Apply `Dockerfile` instructions to the image that is created, for example: `changes=ENV DEBUG=true`. Note that `ENV DEBUG=true` should be URI component encoded. Supported `Dockerfile` instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` type: "array" items: type: "string" - name: "platform" in: "query" description: "Platform in the format os[/arch[/variant]]" type: "string" default: "" tags: ["Image"] /images/{name}/json: get: summary: "Inspect an image" description: "Return low-level information about an image." operationId: "ImageInspect" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/Image" examples: application/json: Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" Comment: "" Os: "linux" Architecture: "amd64" Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" ContainerConfig: Tty: false Hostname: "e611e15f9c9d" Domainname: "" AttachStdout: false PublishService: "" AttachStdin: false OpenStdin: false StdinOnce: false NetworkDisabled: false OnBuild: [] Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" User: "" WorkingDir: "" MacAddress: "" AttachStderr: false Labels: com.example.license: "GPL" com.example.version: "1.0" com.example.vendor: "Acme" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Cmd: - "/bin/sh" - "-c" - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" DockerVersion: "1.9.0-dev" VirtualSize: 188359297 Size: 0 Author: "" Created: "2015-09-10T08:30:53.26995814Z" GraphDriver: Name: "aufs" Data: {} RepoDigests: - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" RepoTags: - "example:1.0" - "example:latest" - "example:stable" Config: Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" NetworkDisabled: false OnBuild: [] StdinOnce: false PublishService: "" AttachStdin: false OpenStdin: false Domainname: "" AttachStdout: false Tty: false Hostname: "e611e15f9c9d" Cmd: - "/bin/bash" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Labels: com.example.vendor: "Acme" com.example.version: "1.0" com.example.license: "GPL" MacAddress: "" AttachStderr: false WorkingDir: "" User: "" RootFS: Type: "layers" Layers: - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: someimage (tag: latest)" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or id" type: "string" required: true tags: ["Image"] /images/{name}/history: get: summary: "Get the history of an image" description: "Return parent layers of an image." operationId: "ImageHistory" produces: ["application/json"] responses: 200: description: "List of image layers" schema: type: "array" items: type: "object" x-go-name: HistoryResponseItem title: "HistoryResponseItem" description: "individual image layer information in response to ImageHistory operation" required: [Id, Created, CreatedBy, Tags, Size, Comment] properties: Id: type: "string" x-nullable: false Created: type: "integer" format: "int64" x-nullable: false CreatedBy: type: "string" x-nullable: false Tags: type: "array" items: type: "string" Size: type: "integer" format: "int64" x-nullable: false Comment: type: "string" x-nullable: false examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" Created: 1398108230 CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" Tags: - "ubuntu:lucid" - "ubuntu:10.04" Size: 182964289 Comment: "" - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" Created: 1398108222 CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <[email protected]> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" Tags: [] Size: 0 Comment: "" - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" Created: 1371157430 CreatedBy: "" Tags: - "scratch12:latest" - "scratch:latest" Size: 0 Comment: "Imported from -" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true tags: ["Image"] /images/{name}/push: post: summary: "Push an image" description: | Push an image to a registry. If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. The push is cancelled if the HTTP connection is closed. operationId: "ImagePush" consumes: - "application/octet-stream" responses: 200: description: "No error" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID." type: "string" required: true - name: "tag" in: "query" description: "The tag to associate with the image on the registry." type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration. Refer to the [authentication section](#section/Authentication) for details. type: "string" required: true tags: ["Image"] /images/{name}/tag: post: summary: "Tag an image" description: "Tag an image so that it becomes part of a repository." operationId: "ImageTag" responses: 201: description: "No error" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID to tag." type: "string" required: true - name: "repo" in: "query" description: "The repository to tag in. For example, `someuser/someimage`." type: "string" - name: "tag" in: "query" description: "The name of the new tag." type: "string" tags: ["Image"] /images/{name}: delete: summary: "Remove an image" description: | Remove an image, along with any untagged parent images that were referenced by that image. Images can't be removed if they have descendant images, are being used by a running container or are being used by a build. operationId: "ImageDelete" produces: ["application/json"] responses: 200: description: "The image was deleted successfully" schema: type: "array" items: $ref: "#/definitions/ImageDeleteResponseItem" examples: application/json: - Untagged: "3e2f21a89f" - Deleted: "3e2f21a89f" - Deleted: "53b4f83ac9" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true - name: "force" in: "query" description: "Remove the image even if it is being used by stopped containers or has other tags" type: "boolean" default: false - name: "noprune" in: "query" description: "Do not delete untagged parent images" type: "boolean" default: false tags: ["Image"] /images/search: get: summary: "Search images" description: "Search for an image on Docker Hub." operationId: "ImageSearch" produces: - "application/json" responses: 200: description: "No error" schema: type: "array" items: type: "object" title: "ImageSearchResponseItem" properties: description: type: "string" is_official: type: "boolean" is_automated: type: "boolean" name: type: "string" star_count: type: "integer" examples: application/json: - description: "" is_official: false is_automated: false name: "wma55/u1210sshd" star_count: 0 - description: "" is_official: false is_automated: false name: "jdswinbank/sshd" star_count: 0 - description: "" is_official: false is_automated: false name: "vgauthier/sshd" star_count: 0 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "term" in: "query" description: "Term to search" type: "string" required: true - name: "limit" in: "query" description: "Maximum number of results to return" type: "integer" - name: "filters" in: "query" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - `is-automated=(true|false)` - `is-official=(true|false)` - `stars=<number>` Matches images that has at least 'number' stars. type: "string" tags: ["Image"] /images/prune: post: summary: "Delete unused images" produces: - "application/json" operationId: "ImagePrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `dangling=<boolean>` When set to `true` (or `1`), prune only unused *and* untagged images. When set to `false` (or `0`), all unused images are pruned. - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "ImagePruneResponse" properties: ImagesDeleted: description: "Images that were deleted" type: "array" items: $ref: "#/definitions/ImageDeleteResponseItem" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /auth: post: summary: "Check auth configuration" description: | Validate credentials for a registry and, if available, get an identity token for accessing the registry without password. operationId: "SystemAuth" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "An identity token was generated successfully." schema: type: "object" title: "SystemAuthResponse" required: [Status] properties: Status: description: "The status of the authentication" type: "string" x-nullable: false IdentityToken: description: "An opaque token used to authenticate a user after a successful login" type: "string" x-nullable: false examples: application/json: Status: "Login Succeeded" IdentityToken: "9cbaf023786cd7..." 204: description: "No error" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "authConfig" in: "body" description: "Authentication to check" schema: $ref: "#/definitions/AuthConfig" tags: ["System"] /info: get: summary: "Get system information" operationId: "SystemInfo" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/SystemInfo" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /version: get: summary: "Get version" description: "Returns the version of Docker that is running and various information about the system that Docker is running on." operationId: "SystemVersion" produces: ["application/json"] responses: 200: description: "no error" schema: $ref: "#/definitions/SystemVersion" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /_ping: get: summary: "Ping" description: "This is a dummy endpoint you can use to test if the server is accessible." operationId: "SystemPing" produces: ["text/plain"] responses: 200: description: "no error" schema: type: "string" example: "OK" headers: API-Version: type: "string" description: "Max API Version the server supports" Builder-Version: type: "string" description: "Default version of docker image builder" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" headers: Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" tags: ["System"] head: summary: "Ping" description: "This is a dummy endpoint you can use to test if the server is accessible." operationId: "SystemPingHead" produces: ["text/plain"] responses: 200: description: "no error" schema: type: "string" example: "(empty)" headers: API-Version: type: "string" description: "Max API Version the server supports" Builder-Version: type: "string" description: "Default version of docker image builder" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /commit: post: summary: "Create a new image from a container" operationId: "ImageCommit" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "containerConfig" in: "body" description: "The container configuration" schema: $ref: "#/definitions/ContainerConfig" - name: "container" in: "query" description: "The ID or name of the container to commit" type: "string" - name: "repo" in: "query" description: "Repository name for the created image" type: "string" - name: "tag" in: "query" description: "Tag name for the create image" type: "string" - name: "comment" in: "query" description: "Commit message" type: "string" - name: "author" in: "query" description: "Author of the image (e.g., `John Hannibal Smith <[email protected]>`)" type: "string" - name: "pause" in: "query" description: "Whether to pause the container before committing" type: "boolean" default: true - name: "changes" in: "query" description: "`Dockerfile` instructions to apply while committing" type: "string" tags: ["Image"] /events: get: summary: "Monitor events" description: | Stream real-time events from the server. Various objects within Docker report events when something happens to them. Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` The Docker daemon reports these events: `reload` Services report these events: `create`, `update`, and `remove` Nodes report these events: `create`, `update`, and `remove` Secrets report these events: `create`, `update`, and `remove` Configs report these events: `create`, `update`, and `remove` The Builder reports `prune` events operationId: "SystemEvents" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/EventMessage" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "since" in: "query" description: "Show events created since this timestamp then stream new events." type: "string" - name: "until" in: "query" description: "Show events created until this timestamp then stop streaming." type: "string" - name: "filters" in: "query" description: | A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: - `config=<string>` config name or ID - `container=<string>` container name or ID - `daemon=<string>` daemon name or ID - `event=<string>` event type - `image=<string>` image name or ID - `label=<string>` image or container label - `network=<string>` network name or ID - `node=<string>` node ID - `plugin`=<string> plugin name or ID - `scope`=<string> local or swarm - `secret=<string>` secret name or ID - `service=<string>` service name or ID - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` - `volume=<string>` volume name type: "string" tags: ["System"] /system/df: get: summary: "Get data usage information" operationId: "SystemDataUsage" responses: 200: description: "no error" schema: type: "object" title: "SystemDataUsageResponse" properties: LayersSize: type: "integer" format: "int64" Images: type: "array" items: $ref: "#/definitions/ImageSummary" Containers: type: "array" items: $ref: "#/definitions/ContainerSummary" Volumes: type: "array" items: $ref: "#/definitions/Volume" BuildCache: type: "array" items: $ref: "#/definitions/BuildCache" example: LayersSize: 1092588 Images: - Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" ParentId: "" RepoTags: - "busybox:latest" RepoDigests: - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" Created: 1466724217 Size: 1092588 SharedSize: 0 VirtualSize: 1092588 Labels: {} Containers: 1 Containers: - Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" Names: - "/top" Image: "busybox" ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" Command: "top" Created: 1472592424 Ports: [] SizeRootFs: 1092588 Labels: {} State: "exited" Status: "Exited (0) 56 minutes ago" HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: IPAMConfig: null Links: null Aliases: null NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" Gateway: "172.18.0.1" IPAddress: "172.18.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:12:00:02" Mounts: [] Volumes: - Name: "my-volume" Driver: "local" Mountpoint: "/var/lib/docker/volumes/my-volume/_data" Labels: null Scope: "local" Options: null UsageData: Size: 10920104 RefCount: 2 BuildCache: - ID: "hw53o5aio51xtltp5xjp8v7fx" Parent: "" Type: "regular" Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" InUse: false Shared: true Size: 0 CreatedAt: "2021-06-28T13:31:01.474619385Z" LastUsedAt: "2021-07-07T22:02:32.738075951Z" UsageCount: 26 - ID: "ndlpt0hhvkqcdfkputsk4cq9c" Parent: "hw53o5aio51xtltp5xjp8v7fx" Type: "regular" Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" InUse: false Shared: true Size: 51 CreatedAt: "2021-06-28T13:31:03.002625487Z" LastUsedAt: "2021-07-07T22:02:32.773909517Z" UsageCount: 26 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "type" in: "query" description: | Object types, for which to compute and return data. type: "array" collectionFormat: multi items: type: "string" enum: ["container", "image", "volume", "build-cache"] tags: ["System"] /images/{name}/get: get: summary: "Export an image" description: | Get a tarball containing all images and metadata for a repository. If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. ### Image tarball format An image tarball contains one directory per image layer (named using its long ID), each containing these files: - `VERSION`: currently `1.0` - the file format version - `json`: detailed layer information, similar to `docker inspect layer_id` - `layer.tar`: A tarfile containing the filesystem changes in this layer The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ```json { "hello-world": { "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" } } ``` operationId: "ImageGet" produces: - "application/x-tar" responses: 200: description: "no error" schema: type: "string" format: "binary" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true tags: ["Image"] /images/get: get: summary: "Export several images" description: | Get a tarball containing all images and metadata for several image repositories. For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageGetAll" produces: - "application/x-tar" responses: 200: description: "no error" schema: type: "string" format: "binary" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "names" in: "query" description: "Image names to filter by" type: "array" items: type: "string" tags: ["Image"] /images/load: post: summary: "Import images" description: | Load a set of images and tags into a repository. For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageLoad" consumes: - "application/x-tar" produces: - "application/json" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "imagesTarball" in: "body" description: "Tar archive containing images" schema: type: "string" format: "binary" - name: "quiet" in: "query" description: "Suppress progress details during load." type: "boolean" default: false tags: ["Image"] /containers/{id}/exec: post: summary: "Create an exec instance" description: "Run a command inside a running container." operationId: "ContainerExec" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "container is paused" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "execConfig" in: "body" description: "Exec configuration" schema: type: "object" title: "ExecConfig" properties: AttachStdin: type: "boolean" description: "Attach to `stdin` of the exec command." AttachStdout: type: "boolean" description: "Attach to `stdout` of the exec command." AttachStderr: type: "boolean" description: "Attach to `stderr` of the exec command." DetachKeys: type: "string" description: | Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Tty: type: "boolean" description: "Allocate a pseudo-TTY." Env: description: | A list of environment variables in the form `["VAR=value", ...]`. type: "array" items: type: "string" Cmd: type: "array" description: "Command to run, as a string or array of strings." items: type: "string" Privileged: type: "boolean" description: "Runs the exec process with extended privileges." default: false User: type: "string" description: | The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`. WorkingDir: type: "string" description: | The working directory for the exec process inside the container. example: AttachStdin: false AttachStdout: true AttachStderr: true DetachKeys: "ctrl-p,ctrl-q" Tty: false Cmd: - "date" Env: - "FOO=bar" - "BAZ=quux" required: true - name: "id" in: "path" description: "ID or name of container" type: "string" required: true tags: ["Exec"] /exec/{id}/start: post: summary: "Start an exec instance" description: | Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command. operationId: "ExecStart" consumes: - "application/json" produces: - "application/vnd.docker.raw-stream" responses: 200: description: "No error" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Container is stopped or paused" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "execStartConfig" in: "body" schema: type: "object" title: "ExecStartConfig" properties: Detach: type: "boolean" description: "Detach from the command." Tty: type: "boolean" description: "Allocate a pseudo-TTY." example: Detach: false Tty: false - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" tags: ["Exec"] /exec/{id}/resize: post: summary: "Resize an exec instance" description: | Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance. operationId: "ExecResize" responses: 201: description: "No error" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" - name: "h" in: "query" description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" description: "Width of the TTY session in characters" type: "integer" tags: ["Exec"] /exec/{id}/json: get: summary: "Inspect an exec instance" description: "Return low-level information about an exec instance." operationId: "ExecInspect" produces: - "application/json" responses: 200: description: "No error" schema: type: "object" title: "ExecInspectResponse" properties: CanRemove: type: "boolean" DetachKeys: type: "string" ID: type: "string" Running: type: "boolean" ExitCode: type: "integer" ProcessConfig: $ref: "#/definitions/ProcessConfig" OpenStdin: type: "boolean" OpenStderr: type: "boolean" OpenStdout: type: "boolean" ContainerID: type: "string" Pid: type: "integer" description: "The system process ID for the exec process." examples: application/json: CanRemove: false ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" DetachKeys: "" ExitCode: 2 ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" OpenStderr: true OpenStdin: true OpenStdout: true ProcessConfig: arguments: - "-c" - "exit 2" entrypoint: "sh" privileged: false tty: true user: "1000" Running: false Pid: 42000 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" tags: ["Exec"] /volumes: get: summary: "List volumes" operationId: "VolumeList" produces: ["application/json"] responses: 200: description: "Summary volume data that matches the query" schema: type: "object" title: "VolumeListResponse" description: "Volume list response" required: [Volumes, Warnings] properties: Volumes: type: "array" x-nullable: false description: "List of volumes" items: $ref: "#/definitions/Volume" Warnings: type: "array" x-nullable: false description: | Warnings that occurred when fetching the list of volumes. items: type: "string" examples: application/json: Volumes: - CreatedAt: "2017-07-19T12:00:26Z" Name: "tardis" Driver: "local" Mountpoint: "/var/lib/docker/volumes/tardis" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Scope: "local" Options: device: "tmpfs" o: "size=100m,uid=1000" type: "tmpfs" Warnings: [] 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. Available filters: - `dangling=<boolean>` When set to `true` (or `1`), returns all volumes that are not in use by a container. When set to `false` (or `0`), only volumes that are in use by one or more containers are returned. - `driver=<volume-driver-name>` Matches volumes based on their driver. - `label=<key>` or `label=<key>:<value>` Matches volumes based on the presence of a `label` alone or a `label` and a value. - `name=<volume-name>` Matches all or part of a volume name. type: "string" format: "json" tags: ["Volume"] /volumes/create: post: summary: "Create a volume" operationId: "VolumeCreate" consumes: ["application/json"] produces: ["application/json"] responses: 201: description: "The volume was created successfully" schema: $ref: "#/definitions/Volume" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "volumeConfig" in: "body" required: true description: "Volume configuration" schema: type: "object" description: "Volume configuration" title: "VolumeConfig" properties: Name: description: | The new volume's name. If not specified, Docker generates a name. type: "string" x-nullable: false Driver: description: "Name of the volume driver to use." type: "string" default: "local" x-nullable: false DriverOpts: description: | A mapping of driver options and values. These options are passed directly to the driver and are driver specific. type: "object" additionalProperties: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: Name: "tardis" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Driver: "custom" tags: ["Volume"] /volumes/{name}: get: summary: "Inspect a volume" operationId: "VolumeInspect" produces: ["application/json"] responses: 200: description: "No error" schema: $ref: "#/definitions/Volume" 404: description: "No such volume" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" required: true description: "Volume name or ID" type: "string" tags: ["Volume"] delete: summary: "Remove a volume" description: "Instruct the driver to remove the volume." operationId: "VolumeDelete" responses: 204: description: "The volume was removed" 404: description: "No such volume or volume driver" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Volume is in use and cannot be removed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" required: true description: "Volume name or ID" type: "string" - name: "force" in: "query" description: "Force the removal of the volume" type: "boolean" default: false tags: ["Volume"] /volumes/prune: post: summary: "Delete unused volumes" produces: - "application/json" operationId: "VolumePrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "VolumePruneResponse" properties: VolumesDeleted: description: "Volumes that were deleted" type: "array" items: type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Volume"] /networks: get: summary: "List networks" description: | Returns a list of networks. For details on the format, see the [network inspect endpoint](#operation/NetworkInspect). Note that it uses a different, smaller representation of a network than inspecting a single network. For example, the list of containers attached to the network is not propagated in API versions 1.28 and up. operationId: "NetworkList" produces: - "application/json" responses: 200: description: "No error" schema: type: "array" items: $ref: "#/definitions/Network" examples: application/json: - Name: "bridge" Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" Created: "2016-10-19T06:21:00.416543526Z" Scope: "local" Driver: "bridge" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: - Subnet: "172.17.0.0/16" Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" - Name: "none" Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "null" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: [] Containers: {} Options: {} - Name: "host" Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "host" EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: [] Containers: {} Options: {} 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: - `dangling=<boolean>` When set to `true` (or `1`), returns all networks that are not in use by a container. When set to `false` (or `0`), only networks that are in use by one or more containers are returned. - `driver=<driver-name>` Matches a network's driver. - `id=<network-id>` Matches all or part of a network ID. - `label=<key>` or `label=<key>=<value>` of a network label. - `name=<network-name>` Matches all or part of a network name. - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. type: "string" tags: ["Network"] /networks/{id}: get: summary: "Inspect a network" operationId: "NetworkInspect" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/Network" 404: description: "Network not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "verbose" in: "query" description: "Detailed inspect output for troubleshooting" type: "boolean" default: false - name: "scope" in: "query" description: "Filter the network by scope (swarm, global, or local)" type: "string" tags: ["Network"] delete: summary: "Remove a network" operationId: "NetworkDelete" responses: 204: description: "No error" 403: description: "operation not supported for pre-defined networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such network" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" tags: ["Network"] /networks/create: post: summary: "Create a network" operationId: "NetworkCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "No error" schema: type: "object" title: "NetworkCreateResponse" properties: Id: description: "The ID of the created network." type: "string" Warning: type: "string" example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" 403: description: "operation not supported for pre-defined networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "plugin not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "networkConfig" in: "body" description: "Network configuration" required: true schema: type: "object" title: "NetworkCreateRequest" required: ["Name"] properties: Name: description: "The network's name." type: "string" CheckDuplicate: description: | Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions. type: "boolean" Driver: description: "Name of the network driver plugin to use." type: "string" default: "bridge" Internal: description: "Restrict external access to the network." type: "boolean" Attachable: description: | Globally scoped network is manually attachable by regular containers from workers in swarm mode. type: "boolean" Ingress: description: | Ingress network is the network which provides the routing-mesh in swarm mode. type: "boolean" IPAM: description: "Optional custom IP scheme for the network." $ref: "#/definitions/IPAM" EnableIPv6: description: "Enable IPv6 on the network." type: "boolean" Options: description: "Network specific options to be used by the drivers." type: "object" additionalProperties: type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: Name: "isolated_nw" CheckDuplicate: false Driver: "bridge" EnableIPv6: true IPAM: Driver: "default" Config: - Subnet: "172.20.0.0/16" IPRange: "172.20.10.0/24" Gateway: "172.20.10.11" - Subnet: "2001:db8:abcd::/64" Gateway: "2001:db8:abcd::1011" Options: foo: "bar" Internal: true Attachable: false Ingress: false Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" Labels: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" tags: ["Network"] /networks/{id}/connect: post: summary: "Connect a container to a network" operationId: "NetworkConnect" consumes: - "application/json" responses: 200: description: "No error" 403: description: "Operation not supported for swarm scoped networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Network or container not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "container" in: "body" required: true schema: type: "object" title: "NetworkConnectRequest" properties: Container: type: "string" description: "The ID or name of the container to connect to the network." EndpointConfig: $ref: "#/definitions/EndpointSettings" example: Container: "3613f73ba0e4" EndpointConfig: IPAMConfig: IPv4Address: "172.24.56.89" IPv6Address: "2001:db8::5689" tags: ["Network"] /networks/{id}/disconnect: post: summary: "Disconnect a container from a network" operationId: "NetworkDisconnect" consumes: - "application/json" responses: 200: description: "No error" 403: description: "Operation not supported for swarm scoped networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Network or container not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "container" in: "body" required: true schema: type: "object" title: "NetworkDisconnectRequest" properties: Container: type: "string" description: | The ID or name of the container to disconnect from the network. Force: type: "boolean" description: | Force the container to disconnect from the network. tags: ["Network"] /networks/prune: post: summary: "Delete unused networks" produces: - "application/json" operationId: "NetworkPrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "NetworkPruneResponse" properties: NetworksDeleted: description: "Networks that were deleted" type: "array" items: type: "string" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Network"] /plugins: get: summary: "List plugins" operationId: "PluginList" description: "Returns information about installed plugins." produces: ["application/json"] responses: 200: description: "No error" schema: type: "array" items: $ref: "#/definitions/Plugin" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: - `capability=<capability name>` - `enable=<true>|<false>` tags: ["Plugin"] /plugins/privileges: get: summary: "Get plugin privileges" operationId: "GetPluginPrivileges" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "remote" in: "query" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: - "Plugin" /plugins/pull: post: summary: "Install a plugin" operationId: "PluginPull" description: | Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). produces: - "application/json" responses: 204: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "remote" in: "query" description: | Remote reference for plugin to install. The `:latest` tag is optional, and is used as the default if omitted. required: true type: "string" - name: "name" in: "query" description: | Local name for the pulled plugin. The `:latest` tag is optional, and is used as the default if omitted. required: false type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration to use when pulling a plugin from a registry. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "body" in: "body" schema: type: "array" items: $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" tags: ["Plugin"] /plugins/{name}/json: get: summary: "Inspect a plugin" operationId: "PluginInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Plugin" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: ["Plugin"] /plugins/{name}: delete: summary: "Remove a plugin" operationId: "PluginDelete" responses: 200: description: "no error" schema: $ref: "#/definitions/Plugin" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "force" in: "query" description: | Disable the plugin before removing. This may result in issues if the plugin is in use by a container. type: "boolean" default: false tags: ["Plugin"] /plugins/{name}/enable: post: summary: "Enable a plugin" operationId: "PluginEnable" responses: 200: description: "no error" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "timeout" in: "query" description: "Set the HTTP client timeout (in seconds)" type: "integer" default: 0 tags: ["Plugin"] /plugins/{name}/disable: post: summary: "Disable a plugin" operationId: "PluginDisable" responses: 200: description: "no error" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: ["Plugin"] /plugins/{name}/upgrade: post: summary: "Upgrade a plugin" operationId: "PluginUpgrade" responses: 204: description: "no error" 404: description: "plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "remote" in: "query" description: | Remote reference to upgrade to. The `:latest` tag is optional, and is used as the default if omitted. required: true type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration to use when pulling a plugin from a registry. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "body" in: "body" schema: type: "array" items: $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" tags: ["Plugin"] /plugins/create: post: summary: "Create a plugin" operationId: "PluginCreate" consumes: - "application/x-tar" responses: 204: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "query" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "tarContext" in: "body" description: "Path to tar containing plugin rootfs and manifest" schema: type: "string" format: "binary" tags: ["Plugin"] /plugins/{name}/push: post: summary: "Push a plugin" operationId: "PluginPush" description: | Push a plugin to the registry. parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" responses: 200: description: "no error" 404: description: "plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Plugin"] /plugins/{name}/set: post: summary: "Configure a plugin" operationId: "PluginSet" consumes: - "application/json" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "body" in: "body" schema: type: "array" items: type: "string" example: ["DEBUG=1"] responses: 204: description: "No error" 404: description: "Plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Plugin"] /nodes: get: summary: "List nodes" operationId: "NodeList" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Node" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). Available filters: - `id=<node id>` - `label=<engine label>` - `membership=`(`accepted`|`pending`)` - `name=<node name>` - `node.label=<node label>` - `role=`(`manager`|`worker`)` type: "string" tags: ["Node"] /nodes/{id}: get: summary: "Inspect a node" operationId: "NodeInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Node" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the node" type: "string" required: true tags: ["Node"] delete: summary: "Delete a node" operationId: "NodeDelete" responses: 200: description: "no error" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the node" type: "string" required: true - name: "force" in: "query" description: "Force remove a node from the swarm" default: false type: "boolean" tags: ["Node"] /nodes/{id}/update: post: summary: "Update a node" operationId: "NodeUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID of the node" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/NodeSpec" - name: "version" in: "query" description: | The version number of the node object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Node"] /swarm: get: summary: "Inspect swarm" operationId: "SwarmInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Swarm" 404: description: "no such swarm" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /swarm/init: post: summary: "Initialize a new swarm" operationId: "SwarmInit" produces: - "application/json" - "text/plain" responses: 200: description: "no error" schema: description: "The node ID" type: "string" example: "7v2t30z9blmxuhnyo6s4cpenp" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is already part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmInitRequest" properties: ListenAddr: description: | Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used. type: "string" AdvertiseAddr: description: | Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`, or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is possible to separate the container data traffic from the management traffic of the cluster. type: "string" DataPathPort: description: | DataPathPort specifies the data path port number for data traffic. Acceptable port range is 1024 to 49151. if no port is set or is set to 0, default port 4789 will be used. type: "integer" format: "uint32" DefaultAddrPool: description: | Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" example: ["10.10.0.0/16", "20.20.0.0/16"] ForceNewCluster: description: "Force creation of a new swarm." type: "boolean" SubnetSize: description: | SubnetSize specifies the subnet size of the networks created from the default subnet pool. type: "integer" format: "uint32" Spec: $ref: "#/definitions/SwarmSpec" example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" DataPathPort: 4789 DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] SubnetSize: 24 ForceNewCluster: false Spec: Orchestration: {} Raft: {} Dispatcher: {} CAConfig: {} EncryptionConfig: AutoLockManagers: false tags: ["Swarm"] /swarm/join: post: summary: "Join an existing swarm" operationId: "SwarmJoin" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is already part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmJoinRequest" properties: ListenAddr: description: | Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). type: "string" AdvertiseAddr: description: | Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`, or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is possible to separate the container data traffic from the management traffic of the cluster. type: "string" RemoteAddrs: description: | Addresses of manager nodes already participating in the swarm. type: "array" items: type: "string" JoinToken: description: "Secret token for joining this swarm." type: "string" example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" RemoteAddrs: - "node1:2377" JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" tags: ["Swarm"] /swarm/leave: post: summary: "Leave a swarm" operationId: "SwarmLeave" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "force" description: | Force leave swarm, even if this is the last manager or that it will break the cluster. in: "query" type: "boolean" default: false tags: ["Swarm"] /swarm/update: post: summary: "Update a swarm" operationId: "SwarmUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: $ref: "#/definitions/SwarmSpec" - name: "version" in: "query" description: | The version number of the swarm object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true - name: "rotateWorkerToken" in: "query" description: "Rotate the worker join token." type: "boolean" default: false - name: "rotateManagerToken" in: "query" description: "Rotate the manager join token." type: "boolean" default: false - name: "rotateManagerUnlockKey" in: "query" description: "Rotate the manager unlock key." type: "boolean" default: false tags: ["Swarm"] /swarm/unlockkey: get: summary: "Get the unlock key" operationId: "SwarmUnlockkey" consumes: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "UnlockKeyResponse" properties: UnlockKey: description: "The swarm's unlock key." type: "string" example: UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /swarm/unlock: post: summary: "Unlock a locked manager" operationId: "SwarmUnlock" consumes: - "application/json" produces: - "application/json" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmUnlockRequest" properties: UnlockKey: description: "The swarm's unlock key." type: "string" example: UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /services: get: summary: "List services" operationId: "ServiceList" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Service" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: - `id=<service id>` - `label=<service label>` - `mode=["replicated"|"global"]` - `name=<service name>` - name: "status" in: "query" type: "boolean" description: | Include service status, with count of running and desired tasks. tags: ["Service"] /services/create: post: summary: "Create a service" operationId: "ServiceCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: type: "object" title: "ServiceCreateResponse" properties: ID: description: "The ID of the created service." type: "string" Warning: description: "Optional warning message" type: "string" example: ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 403: description: "network is not eligible for services" schema: $ref: "#/definitions/ErrorResponse" 409: description: "name conflicts with an existing service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: allOf: - $ref: "#/definitions/ServiceSpec" - type: "object" example: Name: "web" TaskTemplate: ContainerSpec: Image: "nginx:alpine" Mounts: - ReadOnly: true Source: "web-data" Target: "/usr/share/nginx/html" Type: "volume" VolumeOptions: DriverConfig: {} Labels: com.example.something: "something-value" Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] User: "33" DNSConfig: Nameservers: ["8.8.8.8"] Search: ["example.org"] Options: ["timeout:3"] Secrets: - File: Name: "www.example.org.key" UID: "33" GID: "33" Mode: 384 SecretID: "fpjqlhnwb19zds35k8wn80lq9" SecretName: "example_org_domain_key" LogDriver: Name: "json-file" Options: max-file: "3" max-size: "10M" Placement: {} Resources: Limits: MemoryBytes: 104857600 Reservations: {} RestartPolicy: Condition: "on-failure" Delay: 10000000000 MaxAttempts: 10 Mode: Replicated: Replicas: 4 UpdateConfig: Parallelism: 2 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Ports: - Protocol: "tcp" PublishedPort: 8080 TargetPort: 80 Labels: foo: "bar" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration for pulling from private registries. Refer to the [authentication section](#section/Authentication) for details. type: "string" tags: ["Service"] /services/{id}: get: summary: "Inspect a service" operationId: "ServiceInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Service" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" - name: "insertDefaults" in: "query" description: "Fill empty fields with default values." type: "boolean" default: false tags: ["Service"] delete: summary: "Delete a service" operationId: "ServiceDelete" responses: 200: description: "no error" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" tags: ["Service"] /services/{id}/update: post: summary: "Update a service" operationId: "ServiceUpdate" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "no error" schema: $ref: "#/definitions/ServiceUpdateResponse" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" - name: "body" in: "body" required: true schema: allOf: - $ref: "#/definitions/ServiceSpec" - type: "object" example: Name: "top" TaskTemplate: ContainerSpec: Image: "busybox" Args: - "top" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ForceUpdate: 0 Mode: Replicated: Replicas: 1 UpdateConfig: Parallelism: 2 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Mode: "vip" - name: "version" in: "query" description: | The version number of the service object being updated. This is required to avoid conflicting writes. This version number should be the value as currently set on the service *before* the update. You can find the current version by calling `GET /services/{id}` required: true type: "integer" - name: "registryAuthFrom" in: "query" description: | If the `X-Registry-Auth` header is not specified, this parameter indicates where to find registry authorization credentials. type: "string" enum: ["spec", "previous-spec"] default: "spec" - name: "rollback" in: "query" description: | Set to this parameter to `previous` to cause a server-side rollback to the previous service spec. The supplied spec will be ignored in this case. type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration for pulling from private registries. Refer to the [authentication section](#section/Authentication) for details. type: "string" tags: ["Service"] /services/{id}/logs: get: summary: "Get service logs" description: | Get `stdout` and `stderr` logs from a service. See also [`/containers/{id}/logs`](#operation/ContainerLogs). **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "ServiceLogs" responses: 200: description: "logs returned as a stream in response body" schema: type: "string" format: "binary" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such service: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the service" type: "string" - name: "details" in: "query" description: "Show service context and extra details provided to logs." type: "boolean" default: false - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Service"] /tasks: get: summary: "List tasks" operationId: "TaskList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Task" example: - ID: "0kzzo1i0y4jz6027t0k7aezc7" Version: Index: 71 CreatedAt: "2016-06-07T21:07:31.171892745Z" UpdatedAt: "2016-06-07T21:07:31.376370513Z" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:31.290032978Z" State: "running" Message: "started" ContainerStatus: ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" PID: 677 DesiredState: "running" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.10/16" - ID: "1yljwbmlr8er2waf8orvqpwms" Version: Index: 30 CreatedAt: "2016-06-07T21:07:30.019104782Z" UpdatedAt: "2016-06-07T21:07:30.231958098Z" Name: "hopeful_cori" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:30.202183143Z" State: "shutdown" Message: "shutdown" ContainerStatus: ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" DesiredState: "shutdown" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.5/16" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: - `desired-state=(running | shutdown | accepted)` - `id=<task id>` - `label=key` or `label="key=value"` - `name=<task name>` - `node=<node id or name>` - `service=<service name>` tags: ["Task"] /tasks/{id}: get: summary: "Inspect a task" operationId: "TaskInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Task" 404: description: "no such task" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID of the task" required: true type: "string" tags: ["Task"] /tasks/{id}/logs: get: summary: "Get task logs" description: | Get `stdout` and `stderr` logs from a task. See also [`/containers/{id}/logs`](#operation/ContainerLogs). **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "TaskLogs" responses: 200: description: "logs returned as a stream in response body" schema: type: "string" format: "binary" 404: description: "no such task" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such task: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID of the task" type: "string" - name: "details" in: "query" description: "Show task context and extra details provided to logs." type: "boolean" default: false - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Task"] /secrets: get: summary: "List secrets" operationId: "SecretList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Secret" example: - ID: "blt1owaxmitz71s9v5zh81zun" Version: Index: 85 CreatedAt: "2017-07-20T13:55:28.678958722Z" UpdatedAt: "2017-07-20T13:55:28.678958722Z" Spec: Name: "mysql-passwd" Labels: some.label: "some.value" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" - ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" Labels: foo: "bar" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: - `id=<secret id>` - `label=<key> or label=<key>=value` - `name=<secret name>` - `names=<secret name>` tags: ["Secret"] /secrets/create: post: summary: "Create a secret" operationId: "SecretCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 409: description: "name conflicts with an existing object" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" schema: allOf: - $ref: "#/definitions/SecretSpec" - type: "object" example: Name: "app-key.crt" Labels: foo: "bar" Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" tags: ["Secret"] /secrets/{id}: get: summary: "Inspect a secret" operationId: "SecretInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Secret" examples: application/json: ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" Labels: foo: "bar" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" 404: description: "secret not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the secret" tags: ["Secret"] delete: summary: "Delete a secret" operationId: "SecretDelete" produces: - "application/json" responses: 204: description: "no error" 404: description: "secret not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the secret" tags: ["Secret"] /secrets/{id}/update: post: summary: "Update a Secret" operationId: "SecretUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such secret" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the secret" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/SecretSpec" description: | The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values. - name: "version" in: "query" description: | The version number of the secret object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Secret"] /configs: get: summary: "List configs" operationId: "ConfigList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Config" example: - ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "server.conf" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters: - `id=<config id>` - `label=<key> or label=<key>=value` - `name=<config name>` - `names=<config name>` tags: ["Config"] /configs/create: post: summary: "Create a config" operationId: "ConfigCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IdResponse" 409: description: "name conflicts with an existing object" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" schema: allOf: - $ref: "#/definitions/ConfigSpec" - type: "object" example: Name: "server.conf" Labels: foo: "bar" Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" tags: ["Config"] /configs/{id}: get: summary: "Inspect a config" operationId: "ConfigInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Config" examples: application/json: ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" 404: description: "config not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the config" tags: ["Config"] delete: summary: "Delete a config" operationId: "ConfigDelete" produces: - "application/json" responses: 204: description: "no error" 404: description: "config not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the config" tags: ["Config"] /configs/{id}/update: post: summary: "Update a Config" operationId: "ConfigUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such config" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the config" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/ConfigSpec" description: | The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values. - name: "version" in: "query" description: | The version number of the config object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Config"] /distribution/{name}/json: get: summary: "Get image information from the registry" description: | Return image digest and platform information by contacting the registry. operationId: "DistributionInspect" produces: - "application/json" responses: 200: description: "descriptor and platform information" schema: $ref: "#/definitions/DistributionInspect" 401: description: "Failed authentication or no image found" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: someimage (tag: latest)" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or id" type: "string" required: true tags: ["Distribution"] /session: post: summary: "Initialize interactive session" description: | Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. ### Hijacking This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. For example, the client sends this request to upgrade the connection: ``` POST /session HTTP/1.1 Upgrade: h2c Connection: Upgrade ``` The Docker daemon responds with a `101 UPGRADED` response follow with the raw stream: ``` HTTP/1.1 101 UPGRADED Connection: Upgrade Upgrade: h2c ``` operationId: "Session" produces: - "application/vnd.docker.raw-stream" responses: 101: description: "no error, hijacking successful" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Session"]
thaJeztah
8fa6126f75f255805fb7f6ffa716d03b4cc7f76d
772e25fa9f00577ba9f6641530e5aad5ec5ff84c
Perhaps it's nice to add it; good suggestion. Let me add a link.
thaJeztah
4,493
moby/moby
42,763
Dockerfile: update syntax, switch to bullseye, add missing libseccomp-dev, remove build pack
So, this started with the intention to "just" update `buster` to `bullseye`, but finding various issues that needed fixing, or could be improved. ### Dockerfile: update to docker/dockerfile:1.3, and remove temporary fix. I saw we were using an older syntax, and the issue I reported (https://github.com/moby/buildkit/issues/2114) was fixed in dockerfile:1.3 front-end, so upgrading allowed me to remove the temporary fix. ### Dockerfile: remove aufs-tools, as it's not available on bullseye Well, title says all. No more aufs? ### Dockerfile: update to debian bullseye Well, that's what I came here for 😂 ### Dockerfile: add back libseccomp-dev to cross-compile runc Commit https://github.com/moby/moby/commit/7168d98c434af0a35e8c9a05dfb87bb40511a38a removed these, but I _think_ we overlooked that the same stage is used to build runc as well, so we likely need these. (but happy to remove if we really don't need them!) ### Dockerfile: frozen images: update to bullseye, remove buildpack-dep Update the frozen images to also be based on Debian bullseye. Using the "slim" variant (which looks to have all we're currently using), and remove the buildpack-dep frozen image. The buildpack-dep image is quite large, and it looks like we only use it to compile some C binaries, which should work fine on a regular debian image; docker build -t debian:bullseye-slim-gcc -<<EOF FROM debian:bullseye-slim RUN apt-get update && apt-get install -y gcc libc6-dev --no-install-recommends EOF docker image ls REPOSITORY TAG IMAGE ID CREATED SIZE debian bullseye-slim-gcc 1851750242af About a minute ago 255MB buildpack-deps bullseye fe8fece98de2 2 days ago 834MB **- How to verify it** **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-08-19 22:01:49+00:00
2021-08-22 13:37:00+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.16.7 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc # FIXME: temporarily doing a manual chmod as workaround for https://github.com/moby/buildkit/issues/2114 RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ chmod 0644 /etc/apt/trusted.gpg.d/criu.gpg.asc \ && echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ && apt-get update \ && apt-get install -y --no-install-recommends criu \ && install -D /usr/sbin/criu /build/criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN dpkg --add-architecture ppc64el RUN dpkg --add-architecture s390x RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf \ crossbuild-essential-ppc64el \ crossbuild-essential-s390x FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev/buster-backports \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libapparmor-dev:ppc64el \ libapparmor-dev:s390x FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomll ARG GOTOML_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install/tomll.installer,target=/tmp/install/tomll.installer \ . /tmp/install/tomll.installer && PREFIX=/build install_tomll FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ patch \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.26.1 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomll /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/bin/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.3 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.16.7 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="bullseye" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ && apt-get update \ && apt-get install -y --no-install-recommends criu \ && install -D /usr/sbin/criu /build/criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN dpkg --add-architecture ppc64el RUN dpkg --add-architecture s390x RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf \ crossbuild-essential-ppc64el \ crossbuild-essential-s390x FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems, so other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libapparmor-dev:ppc64el \ libapparmor-dev:s390x \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf \ libseccomp-dev:ppc64el \ libseccomp-dev:s390x FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomll ARG GOTOML_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install/tomll.installer,target=/tmp/install/tomll.installer \ . /tmp/install/tomll.installer && PREFIX=/build install_tomll FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ bash-completion \ bzip2 \ inetutils-ping \ iproute2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ patch \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.26.1 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomll /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/bin/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
thaJeztah
9bc0c4903f7f02ce287b9918f64795368e507f9d
2f74fa543b2f9ed6a3fd7c96afe3faa57b45f7e7
```suggestion inetutils-ping \ ``` :eyes:
tianon
4,494
moby/moby
42,763
Dockerfile: update syntax, switch to bullseye, add missing libseccomp-dev, remove build pack
So, this started with the intention to "just" update `buster` to `bullseye`, but finding various issues that needed fixing, or could be improved. ### Dockerfile: update to docker/dockerfile:1.3, and remove temporary fix. I saw we were using an older syntax, and the issue I reported (https://github.com/moby/buildkit/issues/2114) was fixed in dockerfile:1.3 front-end, so upgrading allowed me to remove the temporary fix. ### Dockerfile: remove aufs-tools, as it's not available on bullseye Well, title says all. No more aufs? ### Dockerfile: update to debian bullseye Well, that's what I came here for 😂 ### Dockerfile: add back libseccomp-dev to cross-compile runc Commit https://github.com/moby/moby/commit/7168d98c434af0a35e8c9a05dfb87bb40511a38a removed these, but I _think_ we overlooked that the same stage is used to build runc as well, so we likely need these. (but happy to remove if we really don't need them!) ### Dockerfile: frozen images: update to bullseye, remove buildpack-dep Update the frozen images to also be based on Debian bullseye. Using the "slim" variant (which looks to have all we're currently using), and remove the buildpack-dep frozen image. The buildpack-dep image is quite large, and it looks like we only use it to compile some C binaries, which should work fine on a regular debian image; docker build -t debian:bullseye-slim-gcc -<<EOF FROM debian:bullseye-slim RUN apt-get update && apt-get install -y gcc libc6-dev --no-install-recommends EOF docker image ls REPOSITORY TAG IMAGE ID CREATED SIZE debian bullseye-slim-gcc 1851750242af About a minute ago 255MB buildpack-deps bullseye fe8fece98de2 2 days ago 834MB **- How to verify it** **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-08-19 22:01:49+00:00
2021-08-22 13:37:00+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.16.7 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc # FIXME: temporarily doing a manual chmod as workaround for https://github.com/moby/buildkit/issues/2114 RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ chmod 0644 /etc/apt/trusted.gpg.d/criu.gpg.asc \ && echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ && apt-get update \ && apt-get install -y --no-install-recommends criu \ && install -D /usr/sbin/criu /build/criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN dpkg --add-architecture ppc64el RUN dpkg --add-architecture s390x RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf \ crossbuild-essential-ppc64el \ crossbuild-essential-s390x FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev/buster-backports \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libapparmor-dev:ppc64el \ libapparmor-dev:s390x FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomll ARG GOTOML_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install/tomll.installer,target=/tmp/install/tomll.installer \ . /tmp/install/tomll.installer && PREFIX=/build install_tomll FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ patch \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.26.1 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomll /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/bin/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.3 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.16.7 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="bullseye" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ && apt-get update \ && apt-get install -y --no-install-recommends criu \ && install -D /usr/sbin/criu /build/criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN dpkg --add-architecture ppc64el RUN dpkg --add-architecture s390x RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf \ crossbuild-essential-ppc64el \ crossbuild-essential-s390x FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems, so other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libapparmor-dev:ppc64el \ libapparmor-dev:s390x \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf \ libseccomp-dev:ppc64el \ libseccomp-dev:s390x FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomll ARG GOTOML_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install/tomll.installer,target=/tmp/install/tomll.installer \ . /tmp/install/tomll.installer && PREFIX=/build install_tomll FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ bash-completion \ bzip2 \ inetutils-ping \ iproute2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ patch \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.26.1 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomll /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/bin/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
thaJeztah
9bc0c4903f7f02ce287b9918f64795368e507f9d
2f74fa543b2f9ed6a3fd7c96afe3faa57b45f7e7
🙈
thaJeztah
4,495
moby/moby
42,763
Dockerfile: update syntax, switch to bullseye, add missing libseccomp-dev, remove build pack
So, this started with the intention to "just" update `buster` to `bullseye`, but finding various issues that needed fixing, or could be improved. ### Dockerfile: update to docker/dockerfile:1.3, and remove temporary fix. I saw we were using an older syntax, and the issue I reported (https://github.com/moby/buildkit/issues/2114) was fixed in dockerfile:1.3 front-end, so upgrading allowed me to remove the temporary fix. ### Dockerfile: remove aufs-tools, as it's not available on bullseye Well, title says all. No more aufs? ### Dockerfile: update to debian bullseye Well, that's what I came here for 😂 ### Dockerfile: add back libseccomp-dev to cross-compile runc Commit https://github.com/moby/moby/commit/7168d98c434af0a35e8c9a05dfb87bb40511a38a removed these, but I _think_ we overlooked that the same stage is used to build runc as well, so we likely need these. (but happy to remove if we really don't need them!) ### Dockerfile: frozen images: update to bullseye, remove buildpack-dep Update the frozen images to also be based on Debian bullseye. Using the "slim" variant (which looks to have all we're currently using), and remove the buildpack-dep frozen image. The buildpack-dep image is quite large, and it looks like we only use it to compile some C binaries, which should work fine on a regular debian image; docker build -t debian:bullseye-slim-gcc -<<EOF FROM debian:bullseye-slim RUN apt-get update && apt-get install -y gcc libc6-dev --no-install-recommends EOF docker image ls REPOSITORY TAG IMAGE ID CREATED SIZE debian bullseye-slim-gcc 1851750242af About a minute ago 255MB buildpack-deps bullseye fe8fece98de2 2 days ago 834MB **- How to verify it** **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-08-19 22:01:49+00:00
2021-08-22 13:37:00+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.16.7 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc # FIXME: temporarily doing a manual chmod as workaround for https://github.com/moby/buildkit/issues/2114 RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ chmod 0644 /etc/apt/trusted.gpg.d/criu.gpg.asc \ && echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ && apt-get update \ && apt-get install -y --no-install-recommends criu \ && install -D /usr/sbin/criu /build/criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN dpkg --add-architecture ppc64el RUN dpkg --add-architecture s390x RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf \ crossbuild-essential-ppc64el \ crossbuild-essential-s390x FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev/buster-backports \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libapparmor-dev:ppc64el \ libapparmor-dev:s390x FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomll ARG GOTOML_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install/tomll.installer,target=/tmp/install/tomll.installer \ . /tmp/install/tomll.installer && PREFIX=/build install_tomll FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ patch \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.26.1 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomll /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/bin/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.3 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.16.7 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="bullseye" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ && apt-get update \ && apt-get install -y --no-install-recommends criu \ && install -D /usr/sbin/criu /build/criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN dpkg --add-architecture ppc64el RUN dpkg --add-architecture s390x RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf \ crossbuild-essential-ppc64el \ crossbuild-essential-s390x FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems, so other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libapparmor-dev:ppc64el \ libapparmor-dev:s390x \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf \ libseccomp-dev:ppc64el \ libseccomp-dev:s390x FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomll ARG GOTOML_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install/tomll.installer,target=/tmp/install/tomll.installer \ . /tmp/install/tomll.installer && PREFIX=/build install_tomll FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ bash-completion \ bzip2 \ inetutils-ping \ iproute2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ patch \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.26.1 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomll /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/bin/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
thaJeztah
9bc0c4903f7f02ce287b9918f64795368e507f9d
2f74fa543b2f9ed6a3fd7c96afe3faa57b45f7e7
Should we add something like `TODO update to Debian_11` or just not worry about it? (It's probably fine.)
tianon
4,496
moby/moby
42,763
Dockerfile: update syntax, switch to bullseye, add missing libseccomp-dev, remove build pack
So, this started with the intention to "just" update `buster` to `bullseye`, but finding various issues that needed fixing, or could be improved. ### Dockerfile: update to docker/dockerfile:1.3, and remove temporary fix. I saw we were using an older syntax, and the issue I reported (https://github.com/moby/buildkit/issues/2114) was fixed in dockerfile:1.3 front-end, so upgrading allowed me to remove the temporary fix. ### Dockerfile: remove aufs-tools, as it's not available on bullseye Well, title says all. No more aufs? ### Dockerfile: update to debian bullseye Well, that's what I came here for 😂 ### Dockerfile: add back libseccomp-dev to cross-compile runc Commit https://github.com/moby/moby/commit/7168d98c434af0a35e8c9a05dfb87bb40511a38a removed these, but I _think_ we overlooked that the same stage is used to build runc as well, so we likely need these. (but happy to remove if we really don't need them!) ### Dockerfile: frozen images: update to bullseye, remove buildpack-dep Update the frozen images to also be based on Debian bullseye. Using the "slim" variant (which looks to have all we're currently using), and remove the buildpack-dep frozen image. The buildpack-dep image is quite large, and it looks like we only use it to compile some C binaries, which should work fine on a regular debian image; docker build -t debian:bullseye-slim-gcc -<<EOF FROM debian:bullseye-slim RUN apt-get update && apt-get install -y gcc libc6-dev --no-install-recommends EOF docker image ls REPOSITORY TAG IMAGE ID CREATED SIZE debian bullseye-slim-gcc 1851750242af About a minute ago 255MB buildpack-deps bullseye fe8fece98de2 2 days ago 834MB **- How to verify it** **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-08-19 22:01:49+00:00
2021-08-22 13:37:00+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.16.7 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc # FIXME: temporarily doing a manual chmod as workaround for https://github.com/moby/buildkit/issues/2114 RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ chmod 0644 /etc/apt/trusted.gpg.d/criu.gpg.asc \ && echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ && apt-get update \ && apt-get install -y --no-install-recommends criu \ && install -D /usr/sbin/criu /build/criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN dpkg --add-architecture ppc64el RUN dpkg --add-architecture s390x RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf \ crossbuild-essential-ppc64el \ crossbuild-essential-s390x FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev/buster-backports \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libapparmor-dev:ppc64el \ libapparmor-dev:s390x FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomll ARG GOTOML_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install/tomll.installer,target=/tmp/install/tomll.installer \ . /tmp/install/tomll.installer && PREFIX=/build install_tomll FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ patch \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.26.1 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomll /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/bin/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.3 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.16.7 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="bullseye" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ && apt-get update \ && apt-get install -y --no-install-recommends criu \ && install -D /usr/sbin/criu /build/criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN dpkg --add-architecture ppc64el RUN dpkg --add-architecture s390x RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf \ crossbuild-essential-ppc64el \ crossbuild-essential-s390x FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems, so other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libapparmor-dev:ppc64el \ libapparmor-dev:s390x \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf \ libseccomp-dev:ppc64el \ libseccomp-dev:s390x FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomll ARG GOTOML_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install/tomll.installer,target=/tmp/install/tomll.installer \ . /tmp/install/tomll.installer && PREFIX=/build install_tomll FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ bash-completion \ bzip2 \ inetutils-ping \ iproute2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ patch \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.26.1 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomll /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/bin/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
thaJeztah
9bc0c4903f7f02ce287b9918f64795368e507f9d
2f74fa543b2f9ed6a3fd7c96afe3faa57b45f7e7
I'm hoping @kolyshkin will remind me at that time; https://github.com/opencontainers/runc/pull/3171#discussion_r692213776 😁
thaJeztah
4,497